runtime-atomics.cc revision 014dc512cdd3e367bee49a713fdc5ed92584a3e5
1// Copyright 2015 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "src/runtime/runtime-utils.h" 6 7#include "src/arguments.h" 8#include "src/base/macros.h" 9#include "src/base/platform/mutex.h" 10#include "src/conversions-inl.h" 11#include "src/factory.h" 12 13// Implement Atomic accesses to SharedArrayBuffers as defined in the 14// SharedArrayBuffer draft spec, found here 15// https://github.com/lars-t-hansen/ecmascript_sharedmem 16 17namespace v8 { 18namespace internal { 19 20namespace { 21 22inline bool AtomicIsLockFree(uint32_t size) { 23 return size == 1 || size == 2 || size == 4; 24} 25 26#if V8_CC_GNU 27 28template <typename T> 29inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { 30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, 31 __ATOMIC_SEQ_CST); 32 return oldval; 33} 34 35template <typename T> 36inline T LoadSeqCst(T* p) { 37 T result; 38 __atomic_load(p, &result, __ATOMIC_SEQ_CST); 39 return result; 40} 41 42template <typename T> 43inline void StoreSeqCst(T* p, T value) { 44 __atomic_store_n(p, value, __ATOMIC_SEQ_CST); 45} 46 47template <typename T> 48inline T AddSeqCst(T* p, T value) { 49 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); 50} 51 52template <typename T> 53inline T SubSeqCst(T* p, T value) { 54 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); 55} 56 57template <typename T> 58inline T AndSeqCst(T* p, T value) { 59 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); 60} 61 62template <typename T> 63inline T OrSeqCst(T* p, T value) { 64 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); 65} 66 67template <typename T> 68inline T XorSeqCst(T* p, T value) { 69 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); 70} 71 72template <typename T> 73inline T ExchangeSeqCst(T* p, T value) { 74 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); 75} 76 77#elif V8_CC_MSVC 78 79#define InterlockedCompareExchange32 _InterlockedCompareExchange 80#define InterlockedExchange32 _InterlockedExchange 81#define InterlockedExchangeAdd32 _InterlockedExchangeAdd 82#define InterlockedAnd32 _InterlockedAnd 83#define InterlockedOr32 _InterlockedOr 84#define InterlockedXor32 _InterlockedXor 85#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 86#define InterlockedCompareExchange8 _InterlockedCompareExchange8 87#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 88 89#define ATOMIC_OPS(type, suffix, vctype) \ 90 inline type AddSeqCst(type* p, type value) { \ 91 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 92 bit_cast<vctype>(value)); \ 93 } \ 94 inline type SubSeqCst(type* p, type value) { \ 95 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 96 -bit_cast<vctype>(value)); \ 97 } \ 98 inline type AndSeqCst(type* p, type value) { \ 99 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ 100 bit_cast<vctype>(value)); \ 101 } \ 102 inline type OrSeqCst(type* p, type value) { \ 103 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ 104 bit_cast<vctype>(value)); \ 105 } \ 106 inline type XorSeqCst(type* p, type value) { \ 107 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ 108 bit_cast<vctype>(value)); \ 109 } \ 110 inline type ExchangeSeqCst(type* p, type value) { \ 111 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ 112 bit_cast<vctype>(value)); \ 113 } \ 114 \ 115 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ 116 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ 117 bit_cast<vctype>(newval), \ 118 bit_cast<vctype>(oldval)); \ 119 } \ 120 inline type LoadSeqCst(type* p) { return *p; } \ 121 inline void StoreSeqCst(type* p, type value) { \ 122 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ 123 bit_cast<vctype>(value)); \ 124 } 125 126ATOMIC_OPS(int8_t, 8, char) 127ATOMIC_OPS(uint8_t, 8, char) 128ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ 129ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ 130ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ 131ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ 132 133#undef ATOMIC_OPS_INTEGER 134#undef ATOMIC_OPS 135 136#undef InterlockedCompareExchange32 137#undef InterlockedExchange32 138#undef InterlockedExchangeAdd32 139#undef InterlockedAnd32 140#undef InterlockedOr32 141#undef InterlockedXor32 142#undef InterlockedExchangeAdd16 143#undef InterlockedCompareExchange8 144#undef InterlockedExchangeAdd8 145 146#else 147 148#error Unsupported platform! 149 150#endif 151 152template <typename T> 153T FromObject(Handle<Object> number); 154 155template <> 156inline uint8_t FromObject<uint8_t>(Handle<Object> number) { 157 return NumberToUint32(*number); 158} 159 160template <> 161inline int8_t FromObject<int8_t>(Handle<Object> number) { 162 return NumberToInt32(*number); 163} 164 165template <> 166inline uint16_t FromObject<uint16_t>(Handle<Object> number) { 167 return NumberToUint32(*number); 168} 169 170template <> 171inline int16_t FromObject<int16_t>(Handle<Object> number) { 172 return NumberToInt32(*number); 173} 174 175template <> 176inline uint32_t FromObject<uint32_t>(Handle<Object> number) { 177 return NumberToUint32(*number); 178} 179 180template <> 181inline int32_t FromObject<int32_t>(Handle<Object> number) { 182 return NumberToInt32(*number); 183} 184 185 186inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } 187 188inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } 189 190inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } 191 192inline Object* ToObject(Isolate* isolate, uint16_t t) { 193 return Smi::FromInt(t); 194} 195 196 197inline Object* ToObject(Isolate* isolate, int32_t t) { 198 return *isolate->factory()->NewNumber(t); 199} 200 201 202inline Object* ToObject(Isolate* isolate, uint32_t t) { 203 return *isolate->factory()->NewNumber(t); 204} 205 206 207template <typename T> 208inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, 209 Handle<Object> oldobj, Handle<Object> newobj) { 210 T oldval = FromObject<T>(oldobj); 211 T newval = FromObject<T>(newobj); 212 T result = 213 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); 214 return ToObject(isolate, result); 215} 216 217 218template <typename T> 219inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) { 220 T result = LoadSeqCst(static_cast<T*>(buffer) + index); 221 return ToObject(isolate, result); 222} 223 224 225template <typename T> 226inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, 227 Handle<Object> obj) { 228 T value = FromObject<T>(obj); 229 StoreSeqCst(static_cast<T*>(buffer) + index, value); 230 return *obj; 231} 232 233 234template <typename T> 235inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, 236 Handle<Object> obj) { 237 T value = FromObject<T>(obj); 238 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); 239 return ToObject(isolate, result); 240} 241 242 243template <typename T> 244inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, 245 Handle<Object> obj) { 246 T value = FromObject<T>(obj); 247 T result = SubSeqCst(static_cast<T*>(buffer) + index, value); 248 return ToObject(isolate, result); 249} 250 251 252template <typename T> 253inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, 254 Handle<Object> obj) { 255 T value = FromObject<T>(obj); 256 T result = AndSeqCst(static_cast<T*>(buffer) + index, value); 257 return ToObject(isolate, result); 258} 259 260 261template <typename T> 262inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, 263 Handle<Object> obj) { 264 T value = FromObject<T>(obj); 265 T result = OrSeqCst(static_cast<T*>(buffer) + index, value); 266 return ToObject(isolate, result); 267} 268 269 270template <typename T> 271inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, 272 Handle<Object> obj) { 273 T value = FromObject<T>(obj); 274 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); 275 return ToObject(isolate, result); 276} 277 278 279template <typename T> 280inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, 281 Handle<Object> obj) { 282 T value = FromObject<T>(obj); 283 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); 284 return ToObject(isolate, result); 285} 286 287 288// Uint8Clamped functions 289 290uint8_t ClampToUint8(int32_t value) { 291 if (value < 0) return 0; 292 if (value > 255) return 255; 293 return value; 294} 295 296 297inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, 298 size_t index, 299 Handle<Object> oldobj, 300 Handle<Object> newobj) { 301 typedef int32_t convert_type; 302 uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj)); 303 uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj)); 304 uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index, 305 oldval, newval); 306 return ToObject(isolate, result); 307} 308 309 310inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index, 311 Handle<Object> obj) { 312 typedef int32_t convert_type; 313 uint8_t value = ClampToUint8(FromObject<convert_type>(obj)); 314 StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value); 315 return *obj; 316} 317 318 319#define DO_UINT8_CLAMPED_OP(name, op) \ 320 inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ 321 size_t index, Handle<Object> obj) { \ 322 typedef int32_t convert_type; \ 323 uint8_t* p = static_cast<uint8_t*>(buffer) + index; \ 324 convert_type operand = FromObject<convert_type>(obj); \ 325 uint8_t expected; \ 326 uint8_t result; \ 327 do { \ 328 expected = *p; \ 329 result = ClampToUint8(static_cast<convert_type>(expected) op operand); \ 330 } while (CompareExchangeSeqCst(p, expected, result) != expected); \ 331 return ToObject(isolate, expected); \ 332 } 333 334DO_UINT8_CLAMPED_OP(Add, +) 335DO_UINT8_CLAMPED_OP(Sub, -) 336DO_UINT8_CLAMPED_OP(And, &) 337DO_UINT8_CLAMPED_OP(Or, | ) 338DO_UINT8_CLAMPED_OP(Xor, ^) 339 340#undef DO_UINT8_CLAMPED_OP 341 342 343inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, 344 size_t index, Handle<Object> obj) { 345 typedef int32_t convert_type; 346 uint8_t* p = static_cast<uint8_t*>(buffer) + index; 347 uint8_t result = ClampToUint8(FromObject<convert_type>(obj)); 348 uint8_t expected; 349 do { 350 expected = *p; 351 } while (CompareExchangeSeqCst(p, expected, result) != expected); 352 return ToObject(isolate, expected); 353} 354 355 356} // anonymous namespace 357 358// Duplicated from objects.h 359// V has parameters (Type, type, TYPE, C type, element_size) 360#define INTEGER_TYPED_ARRAYS(V) \ 361 V(Uint8, uint8, UINT8, uint8_t, 1) \ 362 V(Int8, int8, INT8, int8_t, 1) \ 363 V(Uint16, uint16, UINT16, uint16_t, 2) \ 364 V(Int16, int16, INT16, int16_t, 2) \ 365 V(Uint32, uint32, UINT32, uint32_t, 4) \ 366 V(Int32, int32, INT32, int32_t, 4) 367 368 369RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { 370 HandleScope scope(isolate); 371 DCHECK(args.length() == 4); 372 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 373 CONVERT_SIZE_ARG_CHECKED(index, 1); 374 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2); 375 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3); 376 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 377 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 378 379 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 380 NumberToSize(isolate, sta->byte_offset()); 381 382 switch (sta->type()) { 383#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 384 case kExternal##Type##Array: \ 385 return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj); 386 387 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 388#undef TYPED_ARRAY_CASE 389 390 case kExternalUint8ClampedArray: 391 return DoCompareExchangeUint8Clamped(isolate, source, index, oldobj, 392 newobj); 393 394 default: 395 break; 396 } 397 398 UNREACHABLE(); 399 return isolate->heap()->undefined_value(); 400} 401 402 403RUNTIME_FUNCTION(Runtime_AtomicsLoad) { 404 HandleScope scope(isolate); 405 DCHECK(args.length() == 2); 406 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 407 CONVERT_SIZE_ARG_CHECKED(index, 1); 408 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 409 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 410 411 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 412 NumberToSize(isolate, sta->byte_offset()); 413 414 switch (sta->type()) { 415#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 416 case kExternal##Type##Array: \ 417 return DoLoad<ctype>(isolate, source, index); 418 419 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 420#undef TYPED_ARRAY_CASE 421 422 case kExternalUint8ClampedArray: 423 return DoLoad<uint8_t>(isolate, source, index); 424 425 default: 426 break; 427 } 428 429 UNREACHABLE(); 430 return isolate->heap()->undefined_value(); 431} 432 433 434RUNTIME_FUNCTION(Runtime_AtomicsStore) { 435 HandleScope scope(isolate); 436 DCHECK(args.length() == 3); 437 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 438 CONVERT_SIZE_ARG_CHECKED(index, 1); 439 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 440 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 441 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 442 443 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 444 NumberToSize(isolate, sta->byte_offset()); 445 446 switch (sta->type()) { 447#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 448 case kExternal##Type##Array: \ 449 return DoStore<ctype>(isolate, source, index, value); 450 451 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 452#undef TYPED_ARRAY_CASE 453 454 case kExternalUint8ClampedArray: 455 return DoStoreUint8Clamped(isolate, source, index, value); 456 457 default: 458 break; 459 } 460 461 UNREACHABLE(); 462 return isolate->heap()->undefined_value(); 463} 464 465 466RUNTIME_FUNCTION(Runtime_AtomicsAdd) { 467 HandleScope scope(isolate); 468 DCHECK(args.length() == 3); 469 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 470 CONVERT_SIZE_ARG_CHECKED(index, 1); 471 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 472 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 473 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 474 475 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 476 NumberToSize(isolate, sta->byte_offset()); 477 478 switch (sta->type()) { 479#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 480 case kExternal##Type##Array: \ 481 return DoAdd<ctype>(isolate, source, index, value); 482 483 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 484#undef TYPED_ARRAY_CASE 485 486 case kExternalUint8ClampedArray: 487 return DoAddUint8Clamped(isolate, source, index, value); 488 489 default: 490 break; 491 } 492 493 UNREACHABLE(); 494 return isolate->heap()->undefined_value(); 495} 496 497 498RUNTIME_FUNCTION(Runtime_AtomicsSub) { 499 HandleScope scope(isolate); 500 DCHECK(args.length() == 3); 501 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 502 CONVERT_SIZE_ARG_CHECKED(index, 1); 503 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 504 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 505 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 506 507 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 508 NumberToSize(isolate, sta->byte_offset()); 509 510 switch (sta->type()) { 511#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 512 case kExternal##Type##Array: \ 513 return DoSub<ctype>(isolate, source, index, value); 514 515 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 516#undef TYPED_ARRAY_CASE 517 518 case kExternalUint8ClampedArray: 519 return DoSubUint8Clamped(isolate, source, index, value); 520 521 default: 522 break; 523 } 524 525 UNREACHABLE(); 526 return isolate->heap()->undefined_value(); 527} 528 529 530RUNTIME_FUNCTION(Runtime_AtomicsAnd) { 531 HandleScope scope(isolate); 532 DCHECK(args.length() == 3); 533 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 534 CONVERT_SIZE_ARG_CHECKED(index, 1); 535 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 536 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 537 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 538 539 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 540 NumberToSize(isolate, sta->byte_offset()); 541 542 switch (sta->type()) { 543#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 544 case kExternal##Type##Array: \ 545 return DoAnd<ctype>(isolate, source, index, value); 546 547 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 548#undef TYPED_ARRAY_CASE 549 550 case kExternalUint8ClampedArray: 551 return DoAndUint8Clamped(isolate, source, index, value); 552 553 default: 554 break; 555 } 556 557 UNREACHABLE(); 558 return isolate->heap()->undefined_value(); 559} 560 561 562RUNTIME_FUNCTION(Runtime_AtomicsOr) { 563 HandleScope scope(isolate); 564 DCHECK(args.length() == 3); 565 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 566 CONVERT_SIZE_ARG_CHECKED(index, 1); 567 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 568 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 569 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 570 571 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 572 NumberToSize(isolate, sta->byte_offset()); 573 574 switch (sta->type()) { 575#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 576 case kExternal##Type##Array: \ 577 return DoOr<ctype>(isolate, source, index, value); 578 579 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 580#undef TYPED_ARRAY_CASE 581 582 case kExternalUint8ClampedArray: 583 return DoOrUint8Clamped(isolate, source, index, value); 584 585 default: 586 break; 587 } 588 589 UNREACHABLE(); 590 return isolate->heap()->undefined_value(); 591} 592 593 594RUNTIME_FUNCTION(Runtime_AtomicsXor) { 595 HandleScope scope(isolate); 596 DCHECK(args.length() == 3); 597 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 598 CONVERT_SIZE_ARG_CHECKED(index, 1); 599 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 600 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 601 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 602 603 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 604 NumberToSize(isolate, sta->byte_offset()); 605 606 switch (sta->type()) { 607#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 608 case kExternal##Type##Array: \ 609 return DoXor<ctype>(isolate, source, index, value); 610 611 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 612#undef TYPED_ARRAY_CASE 613 614 case kExternalUint8ClampedArray: 615 return DoXorUint8Clamped(isolate, source, index, value); 616 617 default: 618 break; 619 } 620 621 UNREACHABLE(); 622 return isolate->heap()->undefined_value(); 623} 624 625 626RUNTIME_FUNCTION(Runtime_AtomicsExchange) { 627 HandleScope scope(isolate); 628 DCHECK(args.length() == 3); 629 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 630 CONVERT_SIZE_ARG_CHECKED(index, 1); 631 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 632 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 633 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 634 635 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 636 NumberToSize(isolate, sta->byte_offset()); 637 638 switch (sta->type()) { 639#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 640 case kExternal##Type##Array: \ 641 return DoExchange<ctype>(isolate, source, index, value); 642 643 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 644#undef TYPED_ARRAY_CASE 645 646 case kExternalUint8ClampedArray: 647 return DoExchangeUint8Clamped(isolate, source, index, value); 648 649 default: 650 break; 651 } 652 653 UNREACHABLE(); 654 return isolate->heap()->undefined_value(); 655} 656 657 658RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { 659 HandleScope scope(isolate); 660 DCHECK(args.length() == 1); 661 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); 662 uint32_t usize = NumberToUint32(*size); 663 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); 664} 665} // namespace internal 666} // namespace v8 667