Lines Matching refs:__

17 #define __ masm.
51 __ Move(input, a0, a1);
53 __ Push(temp3, temp2, temp1);
57 __ Pop(temp3, temp2, temp1);
61 __ Move(v0, v1, result);
63 __ Ret();
127 __ slti(a6, a2, 2 * loadstore_chunk);
128 __ bne(a6, zero_reg, &lastb);
129 __ mov(v0, a0); // In delay slot.
135 __ xor_(t8, a1, a0);
136 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
137 __ bne(t8, zero_reg, &unaligned);
138 __ subu(a3, zero_reg, a0); // In delay slot.
140 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
141 __ beq(a3, zero_reg, &aligned); // Already aligned.
142 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
144 __ lwr(t8, MemOperand(a1));
145 __ addu(a1, a1, a3);
146 __ swr(t8, MemOperand(a0));
147 __ addu(a0, a0, a3);
153 __ bind(&aligned);
154 __ andi(t8, a2, 0x3f);
155 __ beq(a2, t8, &chkw); // Less than 64?
156 __ subu(a3, a2, t8); // In delay slot.
157 __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
165 __ addu(a4, a0, a2); // a4 is the "past the end" address.
166 __ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address.
169 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
170 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
171 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
172 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
175 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
176 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
177 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
179 __ bind(&loop16w);
180 __ lw(a4, MemOperand(a1));
183 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
184 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
186 __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
188 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
189 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
191 __ bind(&skip_pref);
192 __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
193 __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
194 __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
195 __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
196 __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
197 __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
198 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
200 __ sw(a4, MemOperand(a0));
201 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
202 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
203 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
204 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
205 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
206 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
207 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
209 __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
210 __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
211 __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
212 __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
213 __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
214 __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
215 __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
216 __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
217 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
219 __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
220 __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
221 __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
222 __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
223 __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
224 __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
225 __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
226 __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
227 __ addiu(a0, a0, 16 * loadstore_chunk);
228 __ bne(a0, a3, &loop16w);
229 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
230 __ mov(a2, t8);
235 __ bind(&chkw);
236 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
237 __ andi(t8, a2, 0x1f);
238 __ beq(a2, t8, &chk1w); // Less than 32?
239 __ nop(); // In delay slot.
240 __ lw(a4, MemOperand(a1));
241 __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
242 __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
243 __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
244 __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
245 __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
246 __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
247 __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
248 __ addiu(a1, a1, 8 * loadstore_chunk);
249 __ sw(a4, MemOperand(a0));
250 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
251 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
252 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
253 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
254 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
255 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
256 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
257 __ addiu(a0, a0, 8 * loadstore_chunk);
264 __ bind(&chk1w);
265 __ andi(a2, t8, loadstore_chunk - 1);
266 __ beq(a2, t8, &lastb);
267 __ subu(a3, t8, a2); // In delay slot.
268 __ addu(a3, a0, a3);
270 __ bind(&wordCopy_loop);
271 __ lw(a7, MemOperand(a1));
272 __ addiu(a0, a0, loadstore_chunk);
273 __ addiu(a1, a1, loadstore_chunk);
274 __ bne(a0, a3, &wordCopy_loop);
275 __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
277 __ bind(&lastb);
278 __ Branch(&leave, le, a2, Operand(zero_reg));
279 __ addu(a3, a0, a2);
281 __ bind(&lastbloop);
282 __ lb(v1, MemOperand(a1));
283 __ addiu(a0, a0, 1);
284 __ addiu(a1, a1, 1);
285 __ bne(a0, a3, &lastbloop);
286 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
288 __ bind(&leave);
289 __ jr(ra);
290 __ nop();
295 __ bind(&unaligned);
296 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
297 __ beq(a3, zero_reg, &ua_chk16w);
298 __ subu(a2, a2, a3); // In delay slot.
300 __ lwr(v1, MemOperand(a1));
301 __ lwl(v1,
303 __ addu(a1, a1, a3);
304 __ swr(v1, MemOperand(a0));
305 __ addu(a0, a0, a3);
311 __ bind(&ua_chk16w);
312 __ andi(t8, a2, 0x3f);
313 __ beq(a2, t8, &ua_chkw);
314 __ subu(a3, a2, t8); // In delay slot.
315 __ addu(a3, a0, a3);
318 __ addu(a4, a0, a2);
319 __ Subu(t9, a4, pref_limit);
322 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
323 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
324 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
327 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
328 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
329 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
332 __ bind(&ua_loop16w);
333 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
334 __ lwr(a4, MemOperand(a1));
335 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
336 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
339 __ sltu(v1, t9, a0);
340 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
342 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
344 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
345 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
347 __ bind(&ua_skip_pref);
348 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
349 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
350 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
351 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
352 __ lwl(a4,
354 __ lwl(a5,
356 __ lwl(a6,
358 __ lwl(a7,
360 __ lwl(t0,
362 __ lwl(t1,
364 __ lwl(t2,
366 __ lwl(t3,
368 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
369 __ sw(a4, MemOperand(a0));
370 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
371 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
372 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
373 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
374 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
375 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
376 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
377 __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
378 __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
379 __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
380 __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
381 __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
382 __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
383 __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
384 __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
385 __ lwl(a4,
387 __ lwl(a5,
389 __ lwl(a6,
391 __ lwl(a7,
393 __ lwl(t0,
395 __ lwl(t1,
397 __ lwl(t2,
399 __ lwl(t3,
401 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
402 __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
403 __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
404 __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
405 __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
406 __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
407 __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
408 __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
409 __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
410 __ addiu(a0, a0, 16 * loadstore_chunk);
411 __ bne(a0, a3, &ua_loop16w);
412 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
413 __ mov(a2, t8);
418 __ bind(&ua_chkw);
419 __ Pref(pref_hint_load, MemOperand(a1));
420 __ andi(t8, a2, 0x1f);
422 __ beq(a2, t8, &ua_chk1w);
423 __ nop(); // In delay slot.
424 __ lwr(a4, MemOperand(a1));
425 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
426 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
427 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
428 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
429 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
430 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
431 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
432 __ lwl(a4,
434 __ lwl(a5,
436 __ lwl(a6,
438 __ lwl(a7,
440 __ lwl(t0,
442 __ lwl(t1,
444 __ lwl(t2,
446 __ lwl(t3,
448 __ addiu(a1, a1, 8 * loadstore_chunk);
449 __ sw(a4, MemOperand(a0));
450 __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
451 __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
452 __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
453 __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
454 __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
455 __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
456 __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
457 __ addiu(a0, a0, 8 * loadstore_chunk);
461 __ bind(&ua_chk1w);
462 __ andi(a2, t8, loadstore_chunk - 1);
463 __ beq(a2, t8, &ua_smallCopy);
464 __ subu(a3, t8, a2); // In delay slot.
465 __ addu(a3, a0, a3);
467 __ bind(&ua_wordCopy_loop);
468 __ lwr(v1, MemOperand(a1));
469 __ lwl(v1,
471 __ addiu(a0, a0, loadstore_chunk);
472 __ addiu(a1, a1, loadstore_chunk);
473 __ bne(a0, a3, &ua_wordCopy_loop);
474 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
477 __ bind(&ua_smallCopy);
478 __ beq(a2, zero_reg, &leave);
479 __ addu(a3, a0, a2); // In delay slot.
481 __ bind(&ua_smallCopy_loop);
482 __ lb(v1, MemOperand(a1));
483 __ addiu(a0, a0, 1);
484 __ addiu(a1, a1, 1);
485 __ bne(a0, a3, &ua_smallCopy_loop);
486 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
488 __ jr(ra);
489 __ nop();
513 __ MovFromFloatParameter(f12);
514 __ sqrt_d(f0, f12);
515 __ MovToFloatResult(f0);
516 __ Ret();
528 #undef __
551 #define __ ACCESS_MASM(masm)
566 __ JumpIfJSArrayHasAllocationMemento(
571 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
572 __ RecordWriteField(receiver,
609 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
614 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
615 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
616 __ Branch(&only_change_map, eq, at, Operand(elements));
618 __ push(ra);
619 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
624 __ SmiScale(scratch, length, kDoubleSizeLog2);
625 __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
626 __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
630 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
631 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
633 __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
635 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
636 __ RecordWriteField(receiver,
645 __ Daddu(scratch1, array, Operand(kHeapObjectTag));
646 __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
647 __ RecordWriteField(receiver,
658 __ Daddu(scratch1, elements,
660 __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
661 __ SmiScale(array_end, length, kDoubleSizeLog2);
662 __ Daddu(array_end, array_end, scratch3);
667 __ li(hole_lower, Operand(kHoleNanLower32));
673 __ Branch(USE_DELAY_SLOT, &entry);
674 __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot.
676 __ bind(&only_change_map);
677 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
678 __ RecordWriteField(receiver,
686 __ Branch(&done);
689 __ bind(&gc_required);
690 __ ld(ra, MemOperand(sp, 0));
691 __ Branch(USE_DELAY_SLOT, fail);
692 __ daddiu(sp, sp, kPointerSize); // In delay slot.
695 __ bind(&loop);
696 __ ld(scratch2, MemOperand(scratch1));
697 __ Daddu(scratch1, scratch1, kIntSize);
699 __ JumpIfNotSmi(scratch2, &convert_hole);
700 __ SmiUntag(scratch2);
703 __ mtc1(scratch2, f0);
704 __ cvt_d_w(f0, f0);
705 __ sdc1(f0, MemOperand(scratch3));
706 __ Branch(USE_DELAY_SLOT, &entry);
707 __ daddiu(scratch3, scratch3, kDoubleSize); // In delay slot.
710 __ bind(&convert_hole);
713 __ Or(scratch2, scratch2, Operand(1));
714 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
715 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
718 __ sw(hole_lower, MemOperand(scratch3));
720 __ sw(hole_upper, MemOperand(scratch3, kIntSize));
721 __ Daddu(scratch3, scratch3, kDoubleSize);
723 __ bind(&entry);
724 __ Branch(&loop, lt, scratch3, Operand(array_end));
726 __ bind(&done);
727 __ pop(ra);
750 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
755 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
757 __ Branch(&only_change_map, eq, at, Operand(elements));
759 __ MultiPush(
762 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
771 __ SmiScale(array_size, length, kPointerSizeLog2);
772 __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
773 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
777 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
778 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
779 __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
786 __ Daddu(src_elements, src_elements,
788 __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
789 __ Daddu(array, array, Operand(kHeapObjectTag));
790 __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
791 __ Daddu(dst_end, dst_elements, dst_end);
792 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
800 __ Branch(&entry);
803 __ bind(&gc_required);
804 __ MultiPop(
807 __ Branch(fail);
809 __ bind(&loop);
811 __ lw(upper_bits, MemOperand(src_elements));
812 __ Daddu(src_elements, src_elements, kDoubleSize);
815 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
821 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
826 __ lw(scratch2, MemOperand(heap_number, -12));
827 __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
828 __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
829 __ mov(scratch2, dst_elements);
830 __ sd(heap_number, MemOperand(dst_elements));
831 __ Daddu(dst_elements, dst_elements, kPointerSize);
832 __ RecordWrite(array,
839 __ Branch(&entry);
842 __ bind(&convert_hole);
843 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
844 __ sd(scratch2, MemOperand(dst_elements));
845 __ Daddu(dst_elements, dst_elements, kPointerSize);
847 __ bind(&entry);
848 __ Branch(&loop, lt, dst_elements, Operand(dst_end));
850 __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
852 __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
853 __ RecordWriteField(receiver,
861 __ pop(ra);
863 __ bind(&only_change_map);
865 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
866 __ RecordWriteField(receiver,
883 __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
884 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
888 __ And(at, result, Operand(kIsIndirectStringMask));
889 __ Branch(&check_sequential, eq, at, Operand(zero_reg));
893 __ And(at, result, Operand(kSlicedNotConsMask));
894 __ Branch(&cons_string, eq, at, Operand(zero_reg));
898 __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
899 __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
900 __ dsra32(at, result, 0);
901 __ Daddu(index, index, at);
902 __ jmp(&indirect_string_loaded);
909 __ bind(&cons_string);
910 __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
911 __ LoadRoot(at, Heap::kempty_stringRootIndex);
912 __ Branch(call_runtime, ne, result, Operand(at));
914 __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
916 __ bind(&indirect_string_loaded);
917 __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
918 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
924 __ bind(&check_sequential);
926 __ And(at, result, Operand(kStringRepresentationMask));
927 __ Branch(&external_string, ne, at, Operand(zero_reg));
931 __ Daddu(string,
934 __ jmp(&check_encoding);
937 __ bind(&external_string);
941 __ And(at, result, Operand(kIsIndirectStringMask));
942 __ Assert(eq, kExternalStringExpectedButNotFound,
947 __ And(at, result, Operand(kShortExternalStringMask));
948 __ Branch(call_runtime, ne, at, Operand(zero_reg));
949 __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
952 __ bind(&check_encoding);
954 __ And(at, result, Operand(kStringEncodingMask));
955 __ Branch(&one_byte, ne, at, Operand(zero_reg));
957 __ dsll(at, index, 1);
958 __ Daddu(at, string, at);
959 __ lhu(result, MemOperand(at));
960 __ jmp(&done);
961 __ bind(&one_byte);
963 __ Daddu(at, string, index);
964 __ lbu(result, MemOperand(at));
965 __ bind(&done);
995 __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
997 __ ldc1(double_scratch1, ExpConstant(0, temp3));
998 __ BranchF(&zero, NULL, ge, double_scratch1, input);
1000 __ ldc1(double_scratch2, ExpConstant(1, temp3));
1001 __ BranchF(&infinity, NULL, ge, input, double_scratch2);
1003 __ ldc1(double_scratch1, ExpConstant(3, temp3));
1004 __ ldc1(result, ExpConstant(4, temp3));
1005 __ mul_d(double_scratch1, double_scratch1, input);
1006 __ add_d(double_scratch1, double_scratch1, result);
1007 __ FmoveLow(temp2, double_scratch1);
1008 __ sub_d(double_scratch1, double_scratch1, result);
1009 __ ldc1(result, ExpConstant(6, temp3));
1010 __ ldc1(double_scratch2, ExpConstant(5, temp3));
1011 __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1012 __ sub_d(double_scratch1, double_scratch1, input);
1013 __ sub_d(result, result, double_scratch1);
1014 __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1015 __ mul_d(result, result, double_scratch2);
1016 __ ldc1(double_scratch2, ExpConstant(7, temp3));
1017 __ mul_d(result, result, double_scratch2);
1018 __ sub_d(result, result, double_scratch1);
1022 __ Move(double_scratch2, 1);
1023 __ add_d(result, result, double_scratch2);
1024 __ dsrl(temp1, temp2, 11);
1025 __ Ext(temp2, temp2, 0, 11);
1026 __ Daddu(temp1, temp1, Operand(0x3ff));
1029 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1030 __ dsll(at, temp2, 3);
1031 __ Daddu(temp3, temp3, Operand(at));
1032 __ lwu(temp2, MemOperand(temp3, 0));
1033 __ lwu(temp3, MemOperand(temp3, kIntSize));
1036 __ dsll(at, temp1, 20);
1037 __ Or(temp1, temp3, at);
1038 __ Move(double_scratch1, temp2, temp1);
1040 __ dsll(at, temp1, 20);
1041 __ Or(temp1, temp2, at);
1042 __ Move(double_scratch1, temp3, temp1);
1044 __ mul_d(result, result, double_scratch1);
1045 __ BranchShort(&done);
1047 __ bind(&zero);
1048 __ Move(result, kDoubleRegZero);
1049 __ BranchShort(&done);
1051 __ bind(&infinity);
1052 __ ldc1(result, ExpConstant(2, temp3));
1054 __ bind(&done);
1138 #undef __