1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "lambda/closure.h"
18
19#include "base/logging.h"
20#include "lambda/art_lambda_method.h"
21#include "runtime/mirror/object_reference.h"
22
23static constexpr const bool kClosureSupportsReferences = false;
24static constexpr const bool kClosureSupportsGarbageCollection = false;
25
26namespace art {
27namespace lambda {
28
29template <typename T>
30// TODO: can I return T __attribute__((__aligned__(1)))* here instead?
31const uint8_t* Closure::GetUnsafeAtOffset(size_t offset) const {
32  // Do not DCHECK here with existing helpers since most of them will call into this function.
33  return reinterpret_cast<const uint8_t*>(captured_) + offset;
34}
35
36size_t Closure::GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const {
37  switch (variable_type) {
38    case ShortyFieldType::kLambda:
39    {
40      return GetClosureSize(GetUnsafeAtOffset<Closure>(offset));
41    }
42    default:
43      DCHECK(variable_type.IsStaticSize());
44      return variable_type.GetStaticSize();
45  }
46}
47
48// Templatize the flags to give the compiler a fighting chance to eliminate
49// any unnecessary code through different uses of this function.
50template <Closure::VariableInfo::Flags flags>
51inline Closure::VariableInfo Closure::ParseTypeDescriptor(const char* type_descriptor,
52                                                          size_t upto_index) const {
53  DCHECK(type_descriptor != nullptr);
54
55  VariableInfo result;
56
57  ShortyFieldType last_type;
58  size_t offset = (flags & VariableInfo::kOffset) ? GetStartingOffset() : 0;
59  size_t prev_offset = 0;
60  size_t count = 0;
61
62  while ((type_descriptor =
63      ShortyFieldType::ParseFromFieldTypeDescriptor(type_descriptor, &last_type)) != nullptr) {
64    count++;
65
66    if (flags & VariableInfo::kOffset) {
67      // Accumulate the sizes of all preceding captured variables as the current offset only.
68      offset += prev_offset;
69      prev_offset = GetCapturedVariableSize(last_type, offset);
70    }
71
72    if ((count > upto_index)) {
73      break;
74    }
75  }
76
77  if (flags & VariableInfo::kVariableType) {
78    result.variable_type_ = last_type;
79  }
80
81  if (flags & VariableInfo::kIndex) {
82    result.index_ = count;
83  }
84
85  if (flags & VariableInfo::kCount) {
86    result.count_ = count;
87  }
88
89  if (flags & VariableInfo::kOffset) {
90    result.offset_ = offset;
91  }
92
93  // TODO: We should probably store the result of this in the ArtLambdaMethod,
94  // to avoid re-computing the data every single time for static closures.
95  return result;
96}
97
98size_t Closure::GetCapturedVariablesSize() const {
99  const size_t captured_variable_offset = offsetof(Closure, captured_);
100  DCHECK_GE(GetSize(), captured_variable_offset);  // Prevent underflows.
101  return GetSize() - captured_variable_offset;
102}
103
104size_t Closure::GetSize() const {
105  const size_t static_closure_size = lambda_info_->GetStaticClosureSize();
106  if (LIKELY(lambda_info_->IsStaticSize())) {
107    return static_closure_size;
108  }
109
110  DCHECK_GE(static_closure_size, sizeof(captured_[0].dynamic_.size_));
111  const size_t dynamic_closure_size = captured_[0].dynamic_.size_;
112  // The dynamic size better be at least as big as the static size.
113  DCHECK_GE(dynamic_closure_size, static_closure_size);
114
115  return dynamic_closure_size;
116}
117
118void Closure::CopyTo(void* target, size_t target_size) const {
119  DCHECK_GE(target_size, GetSize());
120
121  // TODO: using memcpy is unsafe with read barriers, fix this once we add reference support
122  static_assert(kClosureSupportsReferences == false,
123                "Do not use memcpy with readbarrier references");
124  memcpy(target, this, GetSize());
125}
126
127ArtMethod* Closure::GetTargetMethod() const {
128  return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
129}
130
131uint32_t Closure::GetHashCode() const {
132  // Start with a non-zero constant, a prime number.
133  uint32_t result = 17;
134
135  // Include the hash with the ArtMethod.
136  {
137    uintptr_t method = reinterpret_cast<uintptr_t>(GetTargetMethod());
138    result = 31 * result + Low32Bits(method);
139    if (sizeof(method) == sizeof(uint64_t)) {
140      result = 31 * result + High32Bits(method);
141    }
142  }
143
144  // Include a hash for each captured variable.
145  for (size_t i = 0; i < GetCapturedVariablesSize(); ++i) {
146    // TODO: not safe for GC-able values since the address can move and the hash code would change.
147    uint8_t captured_variable_raw_value;
148    CopyUnsafeAtOffset<uint8_t>(i, /*out*/&captured_variable_raw_value);  // NOLINT: [whitespace/comma] [3]
149
150    result = 31 * result + captured_variable_raw_value;
151  }
152
153  // TODO: Fix above loop to work for objects and lambdas.
154  static_assert(kClosureSupportsGarbageCollection == false,
155               "Need to update above loop to read the hash code from the "
156                "objects and lambdas recursively");
157
158  return result;
159}
160
161bool Closure::ReferenceEquals(const Closure* other) const {
162  DCHECK(other != nullptr);
163
164  // TODO: Need rework to use read barriers once closures have references inside of them that can
165  // move. Until then, it's safe to just compare the data inside of it directly.
166  static_assert(kClosureSupportsReferences == false,
167                "Unsafe to use memcmp in read barrier collector");
168
169  if (GetSize() != other->GetSize()) {
170    return false;
171  }
172
173  return memcmp(this, other, GetSize());
174}
175
176size_t Closure::GetNumberOfCapturedVariables() const {
177  // TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
178  VariableInfo variable_info =
179      ParseTypeDescriptor<VariableInfo::kCount>(GetCapturedVariablesTypeDescriptor(),
180                                                VariableInfo::kUpToIndexMax);
181  size_t count = variable_info.count_;
182  // Assuming each variable was 1 byte, the size should always be greater or equal than the count.
183  DCHECK_LE(count, GetCapturedVariablesSize());
184  return count;
185}
186
187const char* Closure::GetCapturedVariablesTypeDescriptor() const {
188  return lambda_info_->GetCapturedVariablesTypeDescriptor();
189}
190
191ShortyFieldType Closure::GetCapturedShortyType(size_t index) const {
192  DCHECK_LT(index, GetNumberOfCapturedVariables());
193
194  VariableInfo variable_info =
195      ParseTypeDescriptor<VariableInfo::kVariableType>(GetCapturedVariablesTypeDescriptor(),
196                                                       index);
197
198  return variable_info.variable_type_;
199}
200
201uint32_t Closure::GetCapturedPrimitiveNarrow(size_t index) const {
202  DCHECK(GetCapturedShortyType(index).IsPrimitiveNarrow());
203
204  ShortyFieldType variable_type;
205  size_t offset;
206  GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
207
208  // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
209  // so that we can avoid this nonsense regarding memcpy always overflowing.
210  // Plus, this additional switching seems redundant since the interpreter
211  // would've done it already, and knows the exact type.
212  uint32_t result = 0;
213  static_assert(ShortyFieldTypeTraits::IsPrimitiveNarrowType<decltype(result)>(),
214                "result must be a primitive narrow type");
215  switch (variable_type) {
216    case ShortyFieldType::kBoolean:
217      CopyUnsafeAtOffset<bool>(offset, &result);
218      break;
219    case ShortyFieldType::kByte:
220      CopyUnsafeAtOffset<uint8_t>(offset, &result);
221      break;
222    case ShortyFieldType::kChar:
223      CopyUnsafeAtOffset<uint16_t>(offset, &result);
224      break;
225    case ShortyFieldType::kShort:
226      CopyUnsafeAtOffset<int16_t>(offset, &result);
227      break;
228    case ShortyFieldType::kInt:
229      CopyUnsafeAtOffset<int32_t>(offset, &result);
230      break;
231    case ShortyFieldType::kFloat:
232      // XX: Maybe there should just be a GetCapturedPrimitive<T> to avoid this shuffle?
233      // The interpreter's invoke seems to only special case references and wides,
234      // everything else is treated as a generic 32-bit pattern.
235      CopyUnsafeAtOffset<float>(offset, &result);
236      break;
237    default:
238      LOG(FATAL)
239          << "expected a valid narrow primitive shorty type but got "
240          << static_cast<char>(variable_type);
241      UNREACHABLE();
242  }
243
244  return result;
245}
246
247uint64_t Closure::GetCapturedPrimitiveWide(size_t index) const {
248  DCHECK(GetCapturedShortyType(index).IsPrimitiveWide());
249
250  ShortyFieldType variable_type;
251  size_t offset;
252  GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
253
254  // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
255  // so that we can avoid this nonsense regarding memcpy always overflowing.
256  // Plus, this additional switching seems redundant since the interpreter
257  // would've done it already, and knows the exact type.
258  uint64_t result = 0;
259  static_assert(ShortyFieldTypeTraits::IsPrimitiveWideType<decltype(result)>(),
260                "result must be a primitive wide type");
261  switch (variable_type) {
262    case ShortyFieldType::kLong:
263      CopyUnsafeAtOffset<int64_t>(offset, &result);
264      break;
265    case ShortyFieldType::kDouble:
266      CopyUnsafeAtOffset<double>(offset, &result);
267      break;
268    default:
269      LOG(FATAL)
270          << "expected a valid primitive wide shorty type but got "
271          << static_cast<char>(variable_type);
272      UNREACHABLE();
273  }
274
275  return result;
276}
277
278mirror::Object* Closure::GetCapturedObject(size_t index) const {
279  DCHECK(GetCapturedShortyType(index).IsObject());
280
281  ShortyFieldType variable_type;
282  size_t offset;
283  GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
284
285  // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
286  // so that we can avoid this nonsense regarding memcpy always overflowing.
287  // Plus, this additional switching seems redundant since the interpreter
288  // would've done it already, and knows the exact type.
289  mirror::Object* result = nullptr;
290  static_assert(ShortyFieldTypeTraits::IsObjectType<decltype(result)>(),
291                "result must be an object type");
292  switch (variable_type) {
293    case ShortyFieldType::kObject:
294      // TODO: This seems unsafe. This may need to use gcroots.
295      static_assert(kClosureSupportsGarbageCollection == false,
296                    "May need GcRoots and definitely need mutator locks");
297      {
298        mirror::CompressedReference<mirror::Object> compressed_result;
299        CopyUnsafeAtOffset<uint32_t>(offset, &compressed_result);
300        result = compressed_result.AsMirrorPtr();
301      }
302      break;
303    default:
304      CHECK(false)
305          << "expected a valid shorty type but got " << static_cast<char>(variable_type);
306      UNREACHABLE();
307  }
308
309  return result;
310}
311
312size_t Closure::GetCapturedClosureSize(size_t index) const {
313  DCHECK(GetCapturedShortyType(index).IsLambda());
314  size_t offset = GetCapturedVariableOffset(index);
315
316  auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
317  size_t closure_size = GetClosureSize(captured_ptr + offset);
318
319  return closure_size;
320}
321
322void Closure::CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const {
323  DCHECK(GetCapturedShortyType(index).IsLambda());
324  size_t offset = GetCapturedVariableOffset(index);
325
326  auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
327  size_t closure_size = GetClosureSize(captured_ptr + offset);
328
329  static_assert(ShortyFieldTypeTraits::IsLambdaType<Closure*>(),
330                "result must be a lambda type");
331
332  CopyUnsafeAtOffset<Closure>(offset, destination, closure_size, destination_room);
333}
334
335size_t Closure::GetCapturedVariableOffset(size_t index) const {
336  VariableInfo variable_info =
337      ParseTypeDescriptor<VariableInfo::kOffset>(GetCapturedVariablesTypeDescriptor(),
338                                                 index);
339
340  size_t offset = variable_info.offset_;
341
342  return offset;
343}
344
345void Closure::GetCapturedVariableTypeAndOffset(size_t index,
346                                               ShortyFieldType* out_type,
347                                               size_t* out_offset) const {
348  DCHECK(out_type != nullptr);
349  DCHECK(out_offset != nullptr);
350
351  static constexpr const VariableInfo::Flags kVariableTypeAndOffset =
352      static_cast<VariableInfo::Flags>(VariableInfo::kVariableType | VariableInfo::kOffset);
353  VariableInfo variable_info =
354      ParseTypeDescriptor<kVariableTypeAndOffset>(GetCapturedVariablesTypeDescriptor(),
355                                                  index);
356
357  ShortyFieldType variable_type = variable_info.variable_type_;
358  size_t offset = variable_info.offset_;
359
360  *out_type = variable_type;
361  *out_offset = offset;
362}
363
364template <typename T>
365void Closure::CopyUnsafeAtOffset(size_t offset,
366                                 void* destination,
367                                 size_t src_size,
368                                 size_t destination_room) const {
369  DCHECK_GE(destination_room, src_size);
370  const uint8_t* data_ptr = GetUnsafeAtOffset<T>(offset);
371  memcpy(destination, data_ptr, sizeof(T));
372}
373
374// TODO: This is kind of ugly. I would prefer an unaligned_ptr<Closure> here.
375// Unfortunately C++ doesn't let you lower the alignment (i.e. alignas(1) Closure*) is not legal.
376size_t Closure::GetClosureSize(const uint8_t* closure) {
377  DCHECK(closure != nullptr);
378
379  static_assert(!std::is_base_of<mirror::Object, Closure>::value,
380                "It might be unsafe to call memcpy on a managed object");
381
382  // Safe as long as it's not a mirror Object.
383  // TODO: Should probably wrap this in like MemCpyNative or some such which statically asserts
384  // we aren't trying to copy mirror::Object data around.
385  ArtLambdaMethod* closure_info;
386  memcpy(&closure_info, closure + offsetof(Closure, lambda_info_), sizeof(closure_info));
387
388  if (LIKELY(closure_info->IsStaticSize())) {
389    return closure_info->GetStaticClosureSize();
390  }
391
392  // The size is dynamic, so we need to read it from captured_variables_ portion.
393  size_t dynamic_size;
394  memcpy(&dynamic_size,
395         closure + offsetof(Closure, captured_[0].dynamic_.size_),
396         sizeof(dynamic_size));
397  static_assert(sizeof(dynamic_size) == sizeof(captured_[0].dynamic_.size_),
398                "Dynamic size type must match the structural type of the size");
399
400  DCHECK_GE(dynamic_size, closure_info->GetStaticClosureSize());
401  return dynamic_size;
402}
403
404size_t Closure::GetStartingOffset() const {
405  static constexpr const size_t captured_offset = offsetof(Closure, captured_);
406  if (LIKELY(lambda_info_->IsStaticSize())) {
407    return offsetof(Closure, captured_[0].static_variables_) - captured_offset;
408  } else {
409    return offsetof(Closure, captured_[0].dynamic_.variables_) - captured_offset;
410  }
411}
412
413}  // namespace lambda
414}  // namespace art
415