/external/compiler-rt/test/asan/TestCases/Posix/ |
H A D | halt_on_error-torture.cc | 29 size_t nthreads = 10; variable 57 fprintf(stderr, "Syntax: %s nthreads niter\n", argv[0]); 61 nthreads = (size_t)strtoul(argv[1], 0, 0); 64 pthread_t *tids = new pthread_t[nthreads]; 66 for (size_t i = 0; i < nthreads; ++i) { 73 for (size_t i = 0; i < nthreads; ++i) {
|
/external/autotest/client/tests/monotonic_time/src/ |
H A D | threads.c | 83 * Create nthreads threads. 86 int create_threads(int nthreads, thread_func_t func, void *arg) argument 88 if (nthreads > MAX_THREADS) 89 nthreads = MAX_THREADS; 91 while (--nthreads >= 0) {
|
H A D | time_test.c | 235 int nthreads; local 257 nthreads = create_per_cpu_threads(cpus, test_loop, test); 258 if (nthreads != ncpus) { 260 ncpus, nthreads); 261 if (nthreads) {
|
/external/fio/profiles/ |
H A D | tiobench.c | 9 static unsigned int nthreads = 1; variable 30 unsigned int nthreads; member in struct:tiobench_options 78 .off1 = offsetof(struct tiobench_options, nthreads), 110 sprintf(t_idx, "numjobs=%u", nthreads);
|
/external/ltp/testcases/kernel/fs/fs_fill/ |
H A D | fs_fill.c | 35 static unsigned int nthreads; variable 78 pthread_t threads[nthreads]; 82 for (i = 0; i < nthreads; i++) 96 for (i = 0; i < nthreads; i++) 106 nthreads = tst_ncpus_conf() + 2; 107 workers = SAFE_MALLOC(sizeof(struct worker) * nthreads); 109 for (i = 0; i < nthreads; i++) { 115 tst_res(TINFO, "Running %i writer threads", nthreads);
|
/external/tensorflow/tensorflow/core/kernels/ |
H A D | fused_batch_norm_op.cu.cc | 29 __global__ void VarianceToInvVarianceKernel(int nthreads, const T* input, argument 31 CUDA_1D_KERNEL_LOOP(index, nthreads) { 47 __global__ void InvVarianceToVarianceKernel(int nthreads, double epsilon, argument 49 CUDA_1D_KERNEL_LOOP(index, nthreads) {
|
H A D | inplace_ops_functor_gpu.cu.cc | 30 __global__ void DoParallelConcatOpKernel(int nthreads, const int64 rows, argument 33 CUDA_1D_KERNEL_LOOP(idx, nthreads) {
|
H A D | multinomial_op_gpu.cu.cc | 41 __global__ void MultinomialKernel(int32 nthreads, const int32 num_classes, argument 44 CUDA_1D_KERNEL_LOOP(index, nthreads) {
|
H A D | avgpooling_op_gpu.cu.cc | 42 __global__ void AvePoolBackwardNHWC(const int nthreads, argument 50 CUDA_1D_KERNEL_LOOP(index, nthreads) {
|
H A D | resize_nearest_neighbor_op_gpu.cu.cc | 37 const int nthreads, const T* bottom_data, const int in_height, 41 CUDA_1D_KERNEL_LOOP(index, nthreads) { 66 const int nthreads, const T* top_diff, const int in_height, 70 CUDA_1D_KERNEL_LOOP(index, nthreads) { 36 ResizeNearestNeighborNHWC( const int nthreads, const T* bottom_data, const int in_height, const int in_width, const int channels, const int out_height, const int out_width, const float height_scale, const float width_scale, T* top_data) argument 65 ResizeNearestNeighborBackwardNHWC( const int nthreads, const T* top_diff, const int in_height, const int in_width, const int channels, const int out_height, const int out_width, const float height_scale, const float width_scale, T* bottom_diff) argument
|
H A D | spacetobatch_functor_gpu.cu.cc | 48 __global__ void S2B(const int32 nthreads, T* space_tensor_ptr, argument 50 CUDA_1D_KERNEL_LOOP(batch_tensor_idx, nthreads) {
|
H A D | tile_functor_gpu.cu.cc | 31 __global__ void TileKernel(int nthreads, const T* src, const int32* buf, argument 36 CUDA_1D_KERNEL_LOOP(o_idx, nthreads) {
|
H A D | transpose_functor_gpu.cu.cc | 35 __global__ void TransposeKernel(int nthreads, const T* src, const int32* buf, argument 40 CUDA_1D_KERNEL_LOOP(o_idx, nthreads) {
|
H A D | bias_op_gpu.cu.cc | 48 __global__ void BiasNHWCKernel(int32 nthreads, const T* input, const T* bias, argument 50 CUDA_1D_KERNEL_LOOP(index, nthreads) { 57 __global__ void BiasNCHWKernel(int32 nthreads, const T* input, const T* bias, argument 59 CUDA_1D_KERNEL_LOOP(index, nthreads) { 93 __global__ void BiasGradNHWC_Naive(int32 nthreads, const T* output_backprop, argument 95 CUDA_1D_KERNEL_LOOP(index, nthreads) { 103 __global__ void BiasGradNCHW_Naive(int32 nthreads, const T* output_backprop, argument 106 CUDA_1D_KERNEL_LOOP(index, nthreads) { 116 __global__ void BiasGradNHWC_SharedAtomics(int32 nthreads, argument 126 for (int32 index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; [all...] |
H A D | depthtospace_op_gpu.cu.cc | 34 __global__ void D2S_NHWC(const int32 nthreads, argument 41 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { 65 __global__ void D2S_NCHW(const int32 nthreads, argument 70 CUDA_1D_KERNEL_LOOP(input_idx, nthreads) { 102 __global__ void D2S_NCHW_LOOP(const int32 nthreads, argument 108 CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) {
|
H A D | determinant_op_gpu.cu.cc | 86 __global__ void DeterminantFromPivotedLUKernel(int nthreads, int n, argument 98 CUDA_1D_KERNEL_LOOP(o_idx, nthreads) {
|
H A D | dilation_ops_gpu.cu.cc | 39 __global__ void DilationKernel(const int32 nthreads, const T* input_ptr, argument 46 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { 80 const int32 nthreads, const T* input_ptr, const T* filter_ptr, 85 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { 129 const int32 nthreads, const T* input_ptr, const T* filter_ptr, 134 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { 79 DilationBackpropInputKernel( const int32 nthreads, const T* input_ptr, const T* filter_ptr, const T* out_backprop_ptr, int batch, int input_rows, int input_cols, int depth, int filter_rows, int filter_cols, int output_rows, int output_cols, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, T* in_backprop_ptr) argument 128 DilationBackpropFilterKernel( const int32 nthreads, const T* input_ptr, const T* filter_ptr, const T* out_backprop_ptr, int batch, int input_rows, int input_cols, int depth, int filter_rows, int filter_cols, int output_rows, int output_cols, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, T* filter_backprop_ptr) argument
|
H A D | maxpooling_op_gpu.cu.cc | 46 // nthreads: the number of threads, which is equal to the output size. 64 __global__ void MaxPoolForwardNCHW(const int nthreads, const dtype* bottom_data, argument 72 CUDA_1D_KERNEL_LOOP(index, nthreads) { 108 const int nthreads, const int32* bottom_data, const int height, 115 CUDA_1D_KERNEL_LOOP(index, nthreads) { 139 __global__ void MaxPoolForwardNHWC(const int nthreads, const dtype* bottom_data, argument 147 CUDA_1D_KERNEL_LOOP(index, nthreads) { 180 const int nthreads, const dtype* bottom_data, const int height, 185 CUDA_1D_KERNEL_LOOP(index, nthreads) { 221 // nthreads 107 MaxPoolForwardNoMaskKernel_NCHW_VECT_C( const int nthreads, const int32* bottom_data, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, int32* top_data) argument 179 MaxPoolBackwardNoMaskNHWC( const int nthreads, const dtype* bottom_data, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, const dtype* top_diff, dtype* bottom_diff) argument 238 MaxPoolBackward(const int nthreads, const dtype* top_diff, const int64* mask, const int top_offset, const int bottom_offset, dtype* bottom_diff) argument 263 MaxPoolGradBackwardNoMaskNCHW( const int nthreads, const dtype* bottom_data, const dtype* output_data, const int pooled_height, const int pooled_width, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, const dtype* top_diff, dtype* bottom_diff) argument 303 MaxPoolGradBackwardNoMaskNHWC( const int nthreads, const dtype* bottom_data, const dtype* output_data, const int pooled_height, const int pooled_width, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, const dtype* top_diff, dtype* bottom_diff) argument 362 MaxPoolGradBackward(const int nthreads, const dtype* top_diff, const int64* mask, const int top_offset, const int bottom_offset, dtype* bottom_diff) argument [all...] |
H A D | pooling_ops_3d_gpu.cu.cc | 31 const int nthreads, const dtype* bottom_data, const dtype* output_data, 38 CUDA_1D_KERNEL_LOOP(index, nthreads) { 81 const int nthreads, const dtype* bottom_data, const dtype* output_data, 88 CUDA_1D_KERNEL_LOOP(index, nthreads) { 30 MaxPoolGradBackwardNoMaskNCDHW( const int nthreads, const dtype* bottom_data, const dtype* output_data, const int pooled_plane, const int pooled_height, const int pooled_width, const int channels, const int plane, const int height, const int width, const int kernel_p, const int kernel_h, const int kernel_w, const int stride_p, const int stride_h, const int stride_w, const int pad_p, const int pad_t, const int pad_l, const dtype* top_diff, dtype* bottom_diff) argument 80 MaxPoolGradBackwardNoMaskNDHWC( const int nthreads, const dtype* bottom_data, const dtype* output_data, const int pooled_plane, const int pooled_height, const int pooled_width, const int channels, const int plane, const int height, const int width, const int kernel_p, const int kernel_h, const int kernel_w, const int stride_p, const int stride_h, const int stride_w, const int pad_p, const int pad_t, const int pad_l, const dtype* top_diff, dtype* bottom_diff) argument
|
H A D | resize_bilinear_op_gpu.cu.cc | 36 __global__ void ResizeBilinearKernel(const int32 nthreads, const T* images, argument 41 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { 88 const int32 nthreads, const float* input_grad, float height_scale, 91 CUDA_1D_KERNEL_LOOP(in_idx, nthreads) { 87 ResizeBilinearGradKernel( const int32 nthreads, const float* input_grad, float height_scale, float width_scale, int batch, int original_height, int original_width, int channels, int resized_height, int resized_width, T* output_grad) argument
|
/external/python/cpython2/Python/ |
H A D | thread_sgi.h | 18 static int nthreads; /* protected by count_lock */ variable 158 nthreads++; 182 nthreads--; 190 if (nthreads < 0) { 196 dprintf(("waiting for other threads (%d)\n", nthreads));
|
/external/tensorflow/tensorflow/cc/training/ |
H A D | queue_runner.cc | 71 int nthreads = runs_; local 74 nthreads++; 77 Env::Default(), SanitizeThreadSuffix(queue_name_), nthreads));
|
/external/jemalloc/include/jemalloc/internal/ |
H A D | ctl.h | 36 unsigned nthreads; member in struct:ctl_arena_stats_s
|
/external/ltp/testcases/open_posix_testsuite/stress/threads/pthread_create/ |
H A D | s-c1.c | 111 int nthreads; member in struct:__mes_t 154 int nthreads, ctl, i, tmp; local 204 nthreads = 0; 254 pthread_create(&th[nthreads], 265 nthreads++; 267 /* FAILED if error is != EAGAIN or nthreads > PTHREAD_THREADS_MAX */ 275 if (nthreads > my_max) { 304 /* add to the measure list if nthreads % resolution == 0 */ 305 if ((nthreads % RESOLUTION) == 0) { 314 m_tmp->nthreads [all...] |
/external/ltp/testcases/realtime/perf/latency/ |
H A D | pthread_cond_many.c | 55 int nthreads = 0; variable 186 void test_signal(long iter, long nthreads) argument 197 stats_container_init(&dat, iter * nthreads); 199 pt = malloc(sizeof(*pt) * nthreads); 204 for (j = 0; j < nthreads; j++) { 208 for (i = 0; i < (iter - 1) * nthreads; i += nthreads) { 209 for (j = 0, k = i; j < nthreads; j++, k++) { 219 for (j = 0; j < nthreads; j++) { 231 for (i = 0; i < iter * nthreads; [all...] |