Searched refs:TotalSize (Results 1 - 25 of 77) sorted by relevance

1234

/external/lzma/CPP/7zip/Compress/
H A DCopyCoder.cpp32 TotalSize = 0;
36 if (outSize && size > *outSize - TotalSize)
37 size = (UInt32)(*outSize - TotalSize);
45 TotalSize += size;
48 RINOK(progress->SetRatioInfo(&TotalSize, &TotalSize));
56 *value = TotalSize;
71 return copyCoderSpec->TotalSize == size ? S_OK : E_FAIL;
H A DCopyCoder.h19 UInt64 TotalSize; member in class:NCompress::CCopyCoder
20 CCopyCoder(): TotalSize(0), _buffer(0) {};
/external/llvm/lib/DebugInfo/CodeView/
H A DMemoryTypeTableBuilder.cpp28 int TotalSize = alignTo(Data.size() + SizeOfRecLen, Align); local
29 assert(TotalSize - SizeOfRecLen <= UINT16_MAX);
31 reinterpret_cast<char *>(RecordStorage.Allocate(TotalSize, Align));
32 *reinterpret_cast<ulittle16_t *>(Mem) = uint16_t(TotalSize - SizeOfRecLen);
34 for (int I = Data.size() + SizeOfRecLen; I < TotalSize; ++I)
35 Mem[I] = LF_PAD0 + (TotalSize - I);
43 Records.push_back(StringRef(Mem, TotalSize));
/external/eigen/unsupported/test/
H A Dcxx11_tensor_dimension.cpp24 VERIFY_IS_EQUAL((int)dimensions.TotalSize(), 2*3*7);
37 VERIFY_IS_EQUAL((int)dimensions.TotalSize(), 2*3*7);
54 VERIFY_IS_EQUAL((int)scalar.TotalSize(), 1);
59 VERIFY_IS_EQUAL((int)dscalar.TotalSize(), 1);
H A Dcxx11_tensor_reduction_sycl.cpp39 float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
45 sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
74 float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
75 float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
80 sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
82 sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
112 float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
113 float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
118 sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
120 sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeo
[all...]
H A Dcxx11_tensor_forced_eval_sycl.cpp35 float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
36 float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
37 float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
46 sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float));
47 sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float));
50 sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
H A Dcxx11_tensor_sycl.cpp44 float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
45 float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
46 float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.dimensions().TotalSize()*sizeof(float)));
47 float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
56 sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(float));
68 sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(float));
80 sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(float));
82 sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
96 sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
110 sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeo
[all...]
H A Dcxx11_tensor_broadcast_sycl.cpp47 float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
48 float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
52 sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
54 sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
H A Dcxx11_tensor_morphing.cpp236 VERIFY_IS_EQUAL(slice1.dimensions().TotalSize(), 1);
242 VERIFY_IS_EQUAL(slice2.dimensions().TotalSize(), 2);
248 VERIFY_IS_EQUAL(slice2.dimensions().TotalSize(), 2);
255 VERIFY_IS_EQUAL(slice3.dimensions().TotalSize(), 2);
262 VERIFY_IS_EQUAL(slice4.dimensions().TotalSize(), 6);
272 VERIFY_IS_EQUAL(slice4.dimensions().TotalSize(), 22);
284 VERIFY_IS_EQUAL(slice5.dimensions().TotalSize(), 210);
299 VERIFY_IS_EQUAL(slice5.dimensions().TotalSize(), 770);
316 VERIFY_IS_EQUAL(slice6.dimensions().TotalSize(), 3*5*7*11);
452 VERIFY_IS_EQUAL(tensor.dimensions().TotalSize(), 1
[all...]
H A Dcxx11_tensor_of_complex.cpp91 for (int i = 0; i < t_result.dimensions().TotalSize(); i++) {
/external/llvm/lib/Support/
H A DCachePruning.cpp89 uint64_t TotalSize = 0; local
96 TotalSize += FileStatus.getSize();
141 auto AvailableSpace = TotalSize + SpaceInfo.free;
143 DEBUG(dbgs() << "Occupancy: " << ((100 * TotalSize) / AvailableSpace)
146 while (((100 * TotalSize) / AvailableSpace) > PercentageOfAvailableSpace &&
151 TotalSize -= FileAndSize->first;
153 << FileAndSize->first << "), new occupancy is " << TotalSize
/external/llvm/lib/Target/Hexagon/
H A DHexagonStoreWidening.cpp95 InstrGroup &OG, unsigned &TotalSize, unsigned MaxSize);
96 bool createWideStores(InstrGroup &OG, InstrGroup &NG, unsigned TotalSize);
309 InstrGroup::iterator End, InstrGroup &OG, unsigned &TotalSize,
389 TotalSize = Pow2Size;
398 unsigned TotalSize) {
401 // - only handle a TotalSize of up to 4.
403 if (TotalSize > 4)
428 TotalSize, OldM.getAlignment(),
433 unsigned WOpc = (TotalSize == 2) ? Hexagon::S4_storeirh_io :
434 (TotalSize
308 selectStores(InstrGroup::iterator Begin, InstrGroup::iterator End, InstrGroup &OG, unsigned &TotalSize, unsigned MaxSize) argument
397 createWideStores(InstrGroup &OG, InstrGroup &NG, unsigned TotalSize) argument
[all...]
/external/llvm/lib/Target/ARM/MCTargetDesc/
H A DARMUnwindOpAsm.cpp162 size_t TotalSize = Ops.size() + 1;
163 size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
178 size_t TotalSize = Ops.size() + 2;
179 size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
/external/llvm/lib/Transforms/Scalar/
H A DLoadCombine.cpp185 unsigned TotalSize = 0; local
187 TotalSize += L.Load->getType()->getPrimitiveSizeInBits();
188 while (TotalSize != 0 && !isPowerOf2_32(TotalSize))
189 TotalSize -= Loads.pop_back_val().Load->getType()->getPrimitiveSizeInBits();
217 Ptr, PointerType::get(IntegerType::get(Ptr->getContext(), TotalSize),
/external/compiler-rt/lib/profile/
H A DInstrProfData.inc300 uint32_t TotalSize;
354 uint32_t getSize() const { return TotalSize; }
496 uint32_t TotalSize = sizeof(ValueProfData);
503 TotalSize += getValueProfRecordSize(NumValueSites,
506 return TotalSize;
535 * DstData is not null, the caller is expected to set the TotalSize
542 uint32_t TotalSize =
543 DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
546 DstData ? DstData : Closure->AllocValueProfData(TotalSize);
548 VPD->TotalSize
[all...]
H A DInstrProfilingMerge.c130 SrcValueProfData->TotalSize);
/external/llvm/include/llvm/ProfileData/
H A DInstrProfData.inc300 uint32_t TotalSize;
354 uint32_t getSize() const { return TotalSize; }
496 uint32_t TotalSize = sizeof(ValueProfData);
503 TotalSize += getValueProfRecordSize(NumValueSites,
506 return TotalSize;
535 * DstData is not null, the caller is expected to set the TotalSize
542 uint32_t TotalSize =
543 DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
546 DstData ? DstData : Closure->AllocValueProfData(TotalSize);
548 VPD->TotalSize
[all...]
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/
H A DTensorStorage.h64 EIGEN_STRONG_INLINE DenseIndex size() const { return m_dimensions.TotalSize(); }
137 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); }
H A DTensorInflation.h144 eigen_assert(index < dimensions().TotalSize());
193 eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
207 const double input_size = m_impl.dimensions().TotalSize();
208 const double output_size = m_dimensions.TotalSize();
H A DTensorReverse.h155 eigen_assert(index < dimensions().TotalSize());
199 eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
273 eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
H A DTensorConvolution.h393 for (int i = 0; i < dimensions().TotalSize(); ++i) {
453 const double kernel_size = m_kernelImpl.dimensions().TotalSize();
523 size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
804 m_buf = (Scalar*)m_device.allocate(dimensions().TotalSize() * sizeof(Scalar));
831 size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
862 const int kernel_size = m_kernelImpl.dimensions().TotalSize();
865 const int numP = dimensions().TotalSize() / numX;
933 const int numP = dimensions().TotalSize() / (numX*numY);
1016 const int numP = dimensions().TotalSize() / (numX*numY*numZ);
1053 eigen_assert(index < m_dimensions.TotalSize());
[all...]
H A DTensorConcatenation.h253 eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
269 const double lhs_size = m_leftImpl.dimensions().TotalSize();
270 const double rhs_size = m_rightImpl.dimensions().TotalSize();
349 eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize());
/external/lzma/CPP/7zip/UI/Common/
H A DDirItem.h59 UInt64 TotalSize; member in class:CDirItems
/external/llvm/lib/ProfileData/
H A DInstrProf.cpp592 static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) { argument
593 return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize))
601 if (TotalSize % sizeof(uint64_t))
609 if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize)
624 uint32_t TotalSize = swapToHostOrder<uint32_t>(Header, Endianness); local
625 if (D + TotalSize > BufferEnd)
628 std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize);
629 memcpy(VPD.get(), D, TotalSize);
645 sys::swapByteOrder<uint32_t>(TotalSize);
666 sys::swapByteOrder<uint32_t>(TotalSize);
[all...]
/external/v8/src/compiler/
H A Dloop-analysis.h44 size_t TotalSize() const { return exits_end_ - header_start_; } function in class:v8::internal::compiler::LoopTree::Loop

Completed in 534 milliseconds

1234