Searched refs:block_size (Results 1 - 25 of 413) sorted by relevance

1234567891011>>

/external/valgrind/none/tests/ppc32/
H A Ddata-cache-instructions.c21 int block_size, test_block_size = 4 * MAX_DCBZL_SZB, err; local
34 for (block_size = 0, p = rb; (p - rb) < test_block_size; p++)
36 block_size++;
37 assert(block_size == 16 || block_size == 32 || block_size == 64 || block_size == 128);
40 return block_size;
44 static void test_dcbzl_at(char *addr, char *buffer, int block_size) argument
48 /* Note: Assumption is that the length of buffer is three times the block_size
64 int block_size; local
[all...]
H A Ddata-cache-instructions.stdout.exp2 Passed dcbzl test at un-aligned (1 modulo block_size) address within the test block.
3 Passed dcbzl test at un-aligned ((block_size - 1) modulo block_size) address within the test block.
/external/valgrind/none/tests/ppc64/
H A Ddata-cache-instructions.c21 int block_size, test_block_size = 4 * MAX_DCBZL_SZB, err; local
34 for (block_size = 0, p = rb; (p - rb) < test_block_size; p++)
36 block_size++;
37 assert(block_size == 16 || block_size == 32 || block_size == 64 || block_size == 128);
40 return block_size;
44 static void test_dcbzl_at(char *addr, char *buffer, int block_size) argument
48 /* Note: Assumption is that the length of buffer is three times the block_size
64 int block_size; local
[all...]
H A Ddata-cache-instructions.stdout.exp2 Passed dcbzl test at un-aligned (1 modulo block_size) address within the test block.
3 Passed dcbzl test at un-aligned ((block_size - 1) modulo block_size) address within the test block.
/external/tensorflow/tensorflow/contrib/boosted_trees/lib/utils/
H A Dparallel_for.cc32 const int64 block_size = (batch_size + num_shards - 1) / num_shards; local
33 CHECK_GT(block_size, 0);
34 const int num_shards_used = (batch_size + block_size - 1) / block_size;
36 for (int64 start = block_size; start < batch_size; start += block_size) {
37 auto end = std::min(start + block_size, batch_size);
45 do_work(0, std::min(block_size, batch_size));
/external/tensorflow/tensorflow/python/kernel_tests/
H A Dspacetodepth_op_test.py37 def _testOne(self, inputs, block_size, outputs):
41 x_tf = array_ops.space_to_depth(input_nhwc, block_size)
46 x_tf = array_ops.space_to_depth(input_nhwc, block_size)
51 input_nchw, block_size, data_format="NCHW")
57 block_size = 2
59 self._testOne(x_np, block_size, x_out)
66 block_size = 2
69 self._testOne(x_np, block_size, x_out)
76 block_size = 4
78 self._testOne(x_np, block_size, x_ou
[all...]
H A Ddepthtospace_op_test.py38 def _testOne(self, inputs, block_size, outputs):
42 x_tf = array_ops.depth_to_space(input_nhwc, block_size)
47 x_tf = array_ops.depth_to_space(input_nhwc, block_size)
52 input_nchw, block_size, data_format="NCHW")
58 block_size = 2
60 self._testOne(x_np, block_size, x_out)
69 block_size = 2
74 self._testOne(x_np, block_size, x_out)
77 block_size = 2
91 self._testOne(x_np, block_size, x_ou
[all...]
/external/squashfs-tools/squashfs-tools/
H A Dcompressor.h47 int block_size, int datablock)
51 return comp->init(stream, block_size, datablock);
56 void *dest, void *src, int size, int block_size, int *error)
58 return comp->compress(strm, dest, src, size, block_size, error);
63 void *src, int size, int block_size, int *error)
65 return comp->uncompress(dest, src, size, block_size, error);
83 static inline int compressor_options_post(struct compressor *comp, int block_size) argument
87 return comp->options_post(block_size);
92 int block_size, int *size)
96 return comp->dump_options(block_size, siz
46 compressor_init(struct compressor *comp, void **stream, int block_size, int datablock) argument
55 compressor_compress(struct compressor *comp, void *strm, void *dest, void *src, int size, int block_size, int *error) argument
62 compressor_uncompress(struct compressor *comp, void *dest, void *src, int size, int block_size, int *error) argument
91 compressor_dump_options(struct compressor *comp, int block_size, int *size) argument
100 compressor_extract_options(struct compressor *comp, int block_size, void *buffer, int size) argument
109 compressor_check_options(struct compressor *comp, int block_size, void *buffer, int size) argument
[all...]
/external/tensorflow/tensorflow/core/util/
H A Dwork_sharder.cc50 // Each shard contains up to "block_size" units. [0, total) is sharded
52 // [0, block_size), [block_size, 2*block_size), ...
55 // block_size.
56 const int64 block_size = (total + num_shards - 1) / num_shards; local
57 CHECK_GT(block_size, 0); // total > 0 guarantees this.
58 if (block_size >= total) {
62 const int num_shards_used = (total + block_size - 1) / block_size;
[all...]
/external/ltp/testcases/kernel/syscalls/fallocate/
H A Dfallocate04.c37 static size_t block_size; variable
52 block_size = file_stat.st_blksize;
53 buf_size = NUM_OF_BLOCKS * block_size;
73 memset(buf + i * block_size, 'a' + i, block_size);
126 block_size, block_size) == -1) {
137 if (ret != (ssize_t)block_size) {
158 if ((alloc_size0 - block_size) != alloc_size1)
164 memset(exp_buf + block_size,
[all...]
/external/autotest/client/site_tests/platform_DMVerityCorruption/
H A Dplatform_DMVerityCorruption.py13 def mod_zerofill_block(self, run_count, backing_path, block_size,
16 run_count, backing_path, block_size, block_count))
19 verity_utils.system(dd_cmd % (backing_path, block_size, run_count))
21 def mod_Afill_hash_block(self, run_count, backing_path, block_size,
24 run_count, backing_path, block_size, block_count))
26 dev.seek(block_count * block_size, os.SEEK_SET)
27 dev.seek(run_count * block_size, os.SEEK_CUR)
28 dev.write('A' * block_size)
/external/libvpx/libvpx/vp9/encoder/arm/neon/
H A Dvp9_error_neon.c17 int block_size) {
20 assert(block_size >= 8);
21 assert((block_size % 8) == 0);
37 block_size -= 8;
38 } while (block_size != 0);
16 vp9_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff, int block_size) argument
/external/autotest/client/site_tests/platform_DMVerityBitCorruption/
H A Dplatform_DMVerityBitCorruption.py17 def mod_tweak_block(self, run_count, backing_path, block_size,
20 run_count, backing_path, block_size, block_count))
23 dev.seek(run_count * block_size + self._adjustment)
25 dev.seek(run_count * block_size + self._adjustment)
29 def mod_tweak_hash_block(self, run_count, backing_path, block_size,
32 run_count, backing_path, block_size, block_count))
35 dev.seek(block_count * block_size, os.SEEK_SET)
36 dev.seek(run_count * block_size + self._adjustment, os.SEEK_CUR)
39 dev.seek(block_count * block_size, os.SEEK_SET)
40 dev.seek(run_count * block_size
[all...]
/external/tensorflow/tensorflow/core/platform/cloud/
H A Dfile_block_cache_test.cc56 // If block_size, max_bytes, or both are zero, the cache is a pass-through.
89 for (size_t block_size = 2; block_size <= 4; block_size++) {
92 FileBlockCache cache(block_size, block_size, 0, fetcher);
94 for (size_t n = block_size - 2; n <= block_size + 2; n++) {
100 EXPECT_EQ(got.size(), n) << "block size = " << block_size
105 << "block size = " << block_size << ", offse
121 const size_t block_size = 16; local
151 const size_t block_size = 16; local
197 const size_t block_size = 16; local
220 const size_t block_size = 16; local
446 const int block_size = 8; local
466 const size_t block_size = 16; local
[all...]
/external/tensorflow/tensorflow/core/kernels/
H A Ddepthtospace_op_gpu.cu.cc36 const int block_size, const int batch_size,
50 const int in_h = h / block_size;
51 const int offset_h = h % block_size;
52 const int in_w = w / block_size;
53 const int offset_w = w % block_size;
54 const int offset_d = (offset_h * block_size + offset_w) * output_depth;
67 const int block_size, const int input_width,
84 const int n_bY = n_bY_bX / block_size;
85 const int bX = n_bY_bX - n_bY * block_size;
87 const int n = n_bY / block_size;
34 D2S_NHWC(const int32 nthreads, const dtype* __restrict__ input_ptr, const int block_size, const int batch_size, const int input_height, const int input_width, const int input_depth, const int output_height, const int output_width, const int output_depth, dtype* __restrict__ output_ptr) argument
65 D2S_NCHW(const int32 nthreads, const dtype* __restrict__ input_ptr, const int block_size, const int input_width, const int output_depth_by_input_height, dtype* __restrict__ output_ptr) argument
149 operator ()(const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, int block_size, typename TTypes<T, 4>::Tensor output) argument
167 operator ()(const GPUDevice& d, typename TTypes<T, 5>::ConstTensor input, int block_size, typename TTypes<T, 5>::Tensor output) argument
175 operator ()(const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, int block_size, typename TTypes<T, 4>::Tensor output) argument
221 operator ()(const GPUDevice& d, typename TTypes<T, 5>::ConstTensor input, int block_size, typename TTypes<T, 5>::Tensor output) argument
[all...]
H A Dspacetodepth_op_gpu.cu.cc34 const int block_size, const int batch_size,
48 const int out_h = h / block_size;
49 const int offset_h = h % block_size;
50 const int out_w = w / block_size;
51 const int offset_w = w % block_size;
52 const int offset_d = (offset_h * block_size + offset_w) * input_depth;
66 const int block_size, const int output_width,
78 const int n_iC_oY_bY_oX = input_idx / block_size;
79 const int bX = input_idx - n_iC_oY_bY_oX * block_size;
84 const int n_iC_oY = n_iC_oY_bY / block_size;
33 S2D_NHWC(const int32 nthreads, const dtype* input_ptr, const int block_size, const int batch_size, const int input_height, const int input_width, const int input_depth, const int output_height, const int output_width, const int output_depth, dtype* output_ptr) argument
64 S2D_NCHW(const int32 nthreads, const dtype* __restrict__ input_ptr, const int block_size, const int output_width, const int input_depth_by_output_height, dtype* __restrict__ output_ptr) argument
145 operator ()(const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, int block_size, typename TTypes<T, 4>::Tensor output) argument
163 operator ()(const GPUDevice& d, typename TTypes<T, 5>::ConstTensor input, int block_size, typename TTypes<T, 5>::Tensor output) argument
171 operator ()(const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, int block_size, typename TTypes<T, 4>::Tensor output) argument
217 operator ()(const GPUDevice& d, typename TTypes<T, 5>::ConstTensor input, int block_size, typename TTypes<T, 5>::Tensor output) argument
[all...]
H A Ddepthtospace_op.h30 // 'block_size', and divide the depth dimension by (block_size * block_size)
46 int block_size, typename TTypes<T, 4>::Tensor output);
50 int block_size, typename TTypes<T, 5>::Tensor output);
H A Dspacetodepth_op.h31 // 'block_size', and multiply the depth dimension size by
32 // (block_size * block_size). The offset within each block_size * block_size
47 int block_size, typename TTypes<T, 4>::Tensor output);
51 int block_size, typename TTypes<T, 5>::Tensor output);
/external/libcxx/test/std/numerics/rand/rand.adapt/rand.adapt.disc/
H A Dvalues.pass.cpp20 // static constexpr size_t block_size = p;
38 static_assert((E::block_size == 223), "");
47 where(E::block_size);
55 static_assert((E::block_size == 389), "");
64 where(E::block_size);
/external/tensorflow/tensorflow/compiler/tf2xla/lib/
H A Dcholesky.h28 // The algorithm implements a blocked Cholesky decomposition; `block_size` is
35 int64 block_size = 256);
/external/tensorflow/tensorflow/core/lib/io/
H A Dtable_options.h41 size_t block_size = 262144; member in struct:tensorflow::table::Options
/external/libvpx/libvpx/test/
H A Dvp9_block_error_test.cc35 intptr_t block_size, int64_t *ssz,
43 intptr_t block_size, int64_t *ssz);
47 const tran_low_t *dqcoeff, intptr_t block_size,
50 return fn(coeff, dqcoeff, block_size, ssz);
76 intptr_t block_size; local
84 block_size = 16 << (i % 9); // All block sizes from 4x4, 8x4 ..64x64
85 for (int j = 0; j < block_size; j++) {
99 ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, bit_depth_);
101 ret = error_block_op_(coeff, dqcoeff, block_size, &ssz, bit_depth_));
119 intptr_t block_size; local
46 BlockError8BitWrapper(const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bps) argument
[all...]
/external/tensorflow/tensorflow/compiler/tests/
H A Dspacetobatch_op_test.py73 def _testPad(self, inputs, paddings, block_size, outputs):
79 placeholder, paddings, block_size=block_size)
83 placeholder, paddings, block_size=block_size)
86 def _testOne(self, inputs, block_size, outputs):
88 self._testPad(inputs, paddings, block_size, outputs)
93 block_size = 2
95 self._testOne(x_np, block_size, x_out)
101 block_size
[all...]
/external/opencv/cxcore/src/
H A Dcxmean.cpp133 #define ICV_MEAN_ENTRY_BLOCK_COMMON( block_size ) \
134 int remaining = block_size; \
137 #define ICV_MEAN_ENTRY_BLOCK_C1( sumtype, worktype, block_size )\
140 ICV_MEAN_ENTRY_BLOCK_COMMON( block_size )
142 #define ICV_MEAN_ENTRY_BLOCK_C2( sumtype, worktype, block_size )\
145 ICV_MEAN_ENTRY_BLOCK_COMMON( block_size )
147 #define ICV_MEAN_ENTRY_BLOCK_C3( sumtype, worktype, block_size )\
150 ICV_MEAN_ENTRY_BLOCK_COMMON( block_size )
152 #define ICV_MEAN_ENTRY_BLOCK_C4( sumtype, worktype, block_size )\
155 ICV_MEAN_ENTRY_BLOCK_COMMON( block_size )
[all...]
/external/e2fsprogs/ext2ed/
H A Dfile_com.c35 file_info.global_block_offset=ptr->i_block [0]*file_system_info.block_size;
37 file_info.blocks_count=(ptr->i_size+file_system_info.block_size-1)/file_system_info.block_size;
45 low_read (file_info.buffer,file_system_info.block_size,file_info.global_block_offset);
86 file_info.global_block_offset=file_info.global_block_num*file_system_info.block_size;
87 file_info.file_offset=file_info.block_num*file_system_info.block_size;
89 low_read (file_info.buffer,file_system_info.block_size,file_info.global_block_offset);
107 if (file_info.offset_in_block+offset < file_system_info.block_size) {
134 if (offset < file_system_info.block_size) {
187 file_info.global_block_offset=file_info.global_block_num*file_system_info.block_size;
[all...]

Completed in 550 milliseconds

1234567891011>>