fp_x86.cc revision 33ae5583bdd69847a7316ab38a8fa8ccd63093ef
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18#include "dex/quick/mir_to_lir-inl.h"
19#include "x86_lir.h"
20
21namespace art {
22
23void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
24                                 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
25  X86OpCode op = kX86Nop;
26  RegLocation rl_result;
27
28  /*
29   * Don't attempt to optimize register usage since these opcodes call out to
30   * the handlers.
31   */
32  switch (opcode) {
33    case Instruction::ADD_FLOAT_2ADDR:
34    case Instruction::ADD_FLOAT:
35      op = kX86AddssRR;
36      break;
37    case Instruction::SUB_FLOAT_2ADDR:
38    case Instruction::SUB_FLOAT:
39      op = kX86SubssRR;
40      break;
41    case Instruction::DIV_FLOAT_2ADDR:
42    case Instruction::DIV_FLOAT:
43      op = kX86DivssRR;
44      break;
45    case Instruction::MUL_FLOAT_2ADDR:
46    case Instruction::MUL_FLOAT:
47      op = kX86MulssRR;
48      break;
49    case Instruction::REM_FLOAT_2ADDR:
50    case Instruction::REM_FLOAT:
51      FlushAllRegs();   // Send everything to home location
52      if (cu_->target64) {
53        CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmodf), rl_src1, rl_src2,
54                                                false);
55      } else {
56        CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmodf), rl_src1, rl_src2,
57                                                false);
58      }
59      rl_result = GetReturn(kFPReg);
60      StoreValue(rl_dest, rl_result);
61      return;
62    case Instruction::NEG_FLOAT:
63      GenNegFloat(rl_dest, rl_src1);
64      return;
65    default:
66      LOG(FATAL) << "Unexpected opcode: " << opcode;
67  }
68  rl_src1 = LoadValue(rl_src1, kFPReg);
69  rl_src2 = LoadValue(rl_src2, kFPReg);
70  rl_result = EvalLoc(rl_dest, kFPReg, true);
71  RegStorage r_dest = rl_result.reg;
72  RegStorage r_src1 = rl_src1.reg;
73  RegStorage r_src2 = rl_src2.reg;
74  if (r_dest == r_src2) {
75    r_src2 = AllocTempSingle();
76    OpRegCopy(r_src2, r_dest);
77  }
78  OpRegCopy(r_dest, r_src1);
79  NewLIR2(op, r_dest.GetReg(), r_src2.GetReg());
80  StoreValue(rl_dest, rl_result);
81}
82
83void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
84                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
85  DCHECK(rl_dest.wide);
86  DCHECK(rl_dest.fp);
87  DCHECK(rl_src1.wide);
88  DCHECK(rl_src1.fp);
89  DCHECK(rl_src2.wide);
90  DCHECK(rl_src2.fp);
91  X86OpCode op = kX86Nop;
92  RegLocation rl_result;
93
94  switch (opcode) {
95    case Instruction::ADD_DOUBLE_2ADDR:
96    case Instruction::ADD_DOUBLE:
97      op = kX86AddsdRR;
98      break;
99    case Instruction::SUB_DOUBLE_2ADDR:
100    case Instruction::SUB_DOUBLE:
101      op = kX86SubsdRR;
102      break;
103    case Instruction::DIV_DOUBLE_2ADDR:
104    case Instruction::DIV_DOUBLE:
105      op = kX86DivsdRR;
106      break;
107    case Instruction::MUL_DOUBLE_2ADDR:
108    case Instruction::MUL_DOUBLE:
109      op = kX86MulsdRR;
110      break;
111    case Instruction::REM_DOUBLE_2ADDR:
112    case Instruction::REM_DOUBLE:
113      FlushAllRegs();   // Send everything to home location
114      if (cu_->target64) {
115        CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmod), rl_src1, rl_src2,
116                                                false);
117      } else {
118        CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmod), rl_src1, rl_src2,
119                                                false);
120      }
121      rl_result = GetReturnWide(kFPReg);
122      StoreValueWide(rl_dest, rl_result);
123      return;
124    case Instruction::NEG_DOUBLE:
125      GenNegDouble(rl_dest, rl_src1);
126      return;
127    default:
128      LOG(FATAL) << "Unexpected opcode: " << opcode;
129  }
130  rl_src1 = LoadValueWide(rl_src1, kFPReg);
131  rl_src2 = LoadValueWide(rl_src2, kFPReg);
132  rl_result = EvalLoc(rl_dest, kFPReg, true);
133  if (rl_result.reg == rl_src2.reg) {
134    rl_src2.reg = AllocTempDouble();
135    OpRegCopy(rl_src2.reg, rl_result.reg);
136  }
137  OpRegCopy(rl_result.reg, rl_src1.reg);
138  NewLIR2(op, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
139  StoreValueWide(rl_dest, rl_result);
140}
141
142void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double) {
143  // Compute offsets to the source and destination VRs on stack
144  int src_v_reg_offset = SRegOffset(rl_src.s_reg_low);
145  int dest_v_reg_offset = SRegOffset(rl_dest.s_reg_low);
146
147  // Update the in-register state of source.
148  rl_src = UpdateLocWide(rl_src);
149
150  // All memory accesses below reference dalvik regs.
151  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
152
153  // If the source is in physical register, then put it in its location on stack.
154  if (rl_src.location == kLocPhysReg) {
155    RegisterInfo* reg_info = GetRegInfo(rl_src.reg);
156
157    if (reg_info != nullptr && reg_info->IsTemp()) {
158      // Calling FlushSpecificReg because it will only write back VR if it is dirty.
159      FlushSpecificReg(reg_info);
160      // ResetDef to prevent NullifyRange from removing stores.
161      ResetDef(rl_src.reg);
162    } else {
163      // It must have been register promoted if it is not a temp but is still in physical
164      // register. Since we need it to be in memory to convert, we place it there now.
165      StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64);
166    }
167  }
168
169  // Push the source virtual register onto the x87 stack.
170  LIR *fild64 = NewLIR2NoDest(kX86Fild64M, TargetReg(kSp).GetReg(),
171                              src_v_reg_offset + LOWORD_OFFSET);
172  AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
173                          true /* is_load */, true /* is64bit */);
174
175  // Now pop off x87 stack and store it in the destination VR's stack location.
176  int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
177  int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset;
178  LIR *fstp = NewLIR2NoDest(opcode, TargetReg(kSp).GetReg(), displacement);
179  AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double);
180
181  /*
182   * The result is in a physical register if it was in a temp or was register
183   * promoted. For that reason it is enough to check if it is in physical
184   * register. If it is, then we must do all of the bookkeeping necessary to
185   * invalidate temp (if needed) and load in promoted register (if needed).
186   * If the result's location is in memory, then we do not need to do anything
187   * more since the fstp has already placed the correct value in memory.
188   */
189  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
190      UpdateLocTyped(rl_dest, kFPReg);
191  if (rl_result.location == kLocPhysReg) {
192    /*
193     * We already know that the result is in a physical register but do not know if it is the
194     * right class. So we call EvalLoc(Wide) first which will ensure that it will get moved to the
195     * correct register class.
196     */
197    rl_result = EvalLoc(rl_dest, kFPReg, true);
198    if (is_double) {
199      LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
200
201      StoreFinalValueWide(rl_dest, rl_result);
202    } else {
203      Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
204
205      StoreFinalValue(rl_dest, rl_result);
206    }
207  }
208}
209
210void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
211                               RegLocation rl_src) {
212  RegisterClass rcSrc = kFPReg;
213  X86OpCode op = kX86Nop;
214  RegLocation rl_result;
215  switch (opcode) {
216    case Instruction::INT_TO_FLOAT:
217      rcSrc = kCoreReg;
218      op = kX86Cvtsi2ssRR;
219      break;
220    case Instruction::DOUBLE_TO_FLOAT:
221      rcSrc = kFPReg;
222      op = kX86Cvtsd2ssRR;
223      break;
224    case Instruction::FLOAT_TO_DOUBLE:
225      rcSrc = kFPReg;
226      op = kX86Cvtss2sdRR;
227      break;
228    case Instruction::INT_TO_DOUBLE:
229      rcSrc = kCoreReg;
230      op = kX86Cvtsi2sdRR;
231      break;
232    case Instruction::FLOAT_TO_INT: {
233      rl_src = LoadValue(rl_src, kFPReg);
234      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
235      ClobberSReg(rl_dest.s_reg_low);
236      rl_result = EvalLoc(rl_dest, kCoreReg, true);
237      RegStorage temp_reg = AllocTempSingle();
238
239      LoadConstant(rl_result.reg, 0x7fffffff);
240      NewLIR2(kX86Cvtsi2ssRR, temp_reg.GetReg(), rl_result.reg.GetReg());
241      NewLIR2(kX86ComissRR, rl_src.reg.GetReg(), temp_reg.GetReg());
242      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
243      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
244      NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
245      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
246      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
247      NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
248      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
249      branch_normal->target = NewLIR0(kPseudoTargetLabel);
250      StoreValue(rl_dest, rl_result);
251      return;
252    }
253    case Instruction::DOUBLE_TO_INT: {
254      rl_src = LoadValueWide(rl_src, kFPReg);
255      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
256      ClobberSReg(rl_dest.s_reg_low);
257      rl_result = EvalLoc(rl_dest, kCoreReg, true);
258      RegStorage temp_reg = AllocTempDouble();
259
260      LoadConstant(rl_result.reg, 0x7fffffff);
261      NewLIR2(kX86Cvtsi2sdRR, temp_reg.GetReg(), rl_result.reg.GetReg());
262      NewLIR2(kX86ComisdRR, rl_src.reg.GetReg(), temp_reg.GetReg());
263      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
264      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
265      NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
266      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
267      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
268      NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
269      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
270      branch_normal->target = NewLIR0(kPseudoTargetLabel);
271      StoreValue(rl_dest, rl_result);
272      return;
273    }
274    case Instruction::LONG_TO_DOUBLE:
275      if (Gen64Bit()) {
276        rcSrc = kCoreReg;
277        op = kX86Cvtsqi2sdRR;
278        break;
279      }
280      GenLongToFP(rl_dest, rl_src, true /* is_double */);
281      return;
282    case Instruction::LONG_TO_FLOAT:
283      if (Gen64Bit()) {
284        rcSrc = kCoreReg;
285        op = kX86Cvtsqi2ssRR;
286       break;
287      }
288      GenLongToFP(rl_dest, rl_src, false /* is_double */);
289      return;
290    case Instruction::FLOAT_TO_LONG:
291      if (Gen64Bit()) {
292        rl_src = LoadValue(rl_src, kFPReg);
293        // If result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
294        ClobberSReg(rl_dest.s_reg_low);
295        rl_result = EvalLoc(rl_dest, kCoreReg, true);
296        RegStorage temp_reg = AllocTempSingle();
297
298        // Set 0x7fffffffffffffff to rl_result
299        LoadConstantWide(rl_result.reg, 0x7fffffffffffffff);
300        NewLIR2(kX86Cvtsqi2ssRR, temp_reg.GetReg(), rl_result.reg.GetReg());
301        NewLIR2(kX86ComissRR, rl_src.reg.GetReg(), temp_reg.GetReg());
302        LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
303        LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
304        NewLIR2(kX86Cvttss2sqiRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
305        LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
306        branch_na_n->target = NewLIR0(kPseudoTargetLabel);
307        NewLIR2(kX86Xor64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
308        branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
309        branch_normal->target = NewLIR0(kPseudoTargetLabel);
310        StoreValueWide(rl_dest, rl_result);
311      } else {
312        GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
313      }
314      return;
315    case Instruction::DOUBLE_TO_LONG:
316      if (Gen64Bit()) {
317        rl_src = LoadValueWide(rl_src, kFPReg);
318        // If result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
319        ClobberSReg(rl_dest.s_reg_low);
320        rl_result = EvalLoc(rl_dest, kCoreReg, true);
321        RegStorage temp_reg = AllocTempDouble();
322
323        // Set 0x7fffffffffffffff to rl_result
324        LoadConstantWide(rl_result.reg, 0x7fffffffffffffff);
325        NewLIR2(kX86Cvtsqi2sdRR, temp_reg.GetReg(), rl_result.reg.GetReg());
326        NewLIR2(kX86ComisdRR, rl_src.reg.GetReg(), temp_reg.GetReg());
327        LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
328        LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
329        NewLIR2(kX86Cvttsd2sqiRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
330        LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
331        branch_na_n->target = NewLIR0(kPseudoTargetLabel);
332        NewLIR2(kX86Xor64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
333        branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
334        branch_normal->target = NewLIR0(kPseudoTargetLabel);
335        StoreValueWide(rl_dest, rl_result);
336      } else {
337        GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
338      }
339      return;
340    default:
341      LOG(INFO) << "Unexpected opcode: " << opcode;
342  }
343  // At this point, target will be either float or double.
344  DCHECK(rl_dest.fp);
345  if (rl_src.wide) {
346    rl_src = LoadValueWide(rl_src, rcSrc);
347  } else {
348    rl_src = LoadValue(rl_src, rcSrc);
349  }
350  rl_result = EvalLoc(rl_dest, kFPReg, true);
351  NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
352  if (rl_dest.wide) {
353    StoreValueWide(rl_dest, rl_result);
354  } else {
355    StoreValue(rl_dest, rl_result);
356  }
357}
358
359void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
360                          RegLocation rl_src1, RegLocation rl_src2) {
361  bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
362  bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
363  if (single) {
364    rl_src1 = LoadValue(rl_src1, kFPReg);
365    rl_src2 = LoadValue(rl_src2, kFPReg);
366  } else {
367    rl_src1 = LoadValueWide(rl_src1, kFPReg);
368    rl_src2 = LoadValueWide(rl_src2, kFPReg);
369  }
370  // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
371  ClobberSReg(rl_dest.s_reg_low);
372  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
373  LoadConstantNoClobber(rl_result.reg, unordered_gt ? 1 : 0);
374  if (single) {
375    NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
376  } else {
377    NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
378  }
379  LIR* branch = NULL;
380  if (unordered_gt) {
381    branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
382  }
383  // If the result reg can't be byte accessed, use a jump and move instead of a set.
384  if (!IsByteRegister(rl_result.reg)) {
385    LIR* branch2 = NULL;
386    if (unordered_gt) {
387      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
388      NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
389    } else {
390      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
391      NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x1);
392    }
393    branch2->target = NewLIR0(kPseudoTargetLabel);
394  } else {
395    NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondA /* above - unsigned > */);
396  }
397  NewLIR2(kX86Sbb32RI, rl_result.reg.GetReg(), 0);
398  if (unordered_gt) {
399    branch->target = NewLIR0(kPseudoTargetLabel);
400  }
401  StoreValue(rl_dest, rl_result);
402}
403
404void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
405                                     bool is_double) {
406  LIR* taken = &block_label_list_[bb->taken];
407  LIR* not_taken = &block_label_list_[bb->fall_through];
408  LIR* branch = NULL;
409  RegLocation rl_src1;
410  RegLocation rl_src2;
411  if (is_double) {
412    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
413    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
414    rl_src1 = LoadValueWide(rl_src1, kFPReg);
415    rl_src2 = LoadValueWide(rl_src2, kFPReg);
416    NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
417  } else {
418    rl_src1 = mir_graph_->GetSrc(mir, 0);
419    rl_src2 = mir_graph_->GetSrc(mir, 1);
420    rl_src1 = LoadValue(rl_src1, kFPReg);
421    rl_src2 = LoadValue(rl_src2, kFPReg);
422    NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
423  }
424  ConditionCode ccode = mir->meta.ccode;
425  switch (ccode) {
426    case kCondEq:
427      if (!gt_bias) {
428        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
429        branch->target = not_taken;
430      }
431      break;
432    case kCondNe:
433      if (!gt_bias) {
434        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
435        branch->target = taken;
436      }
437      break;
438    case kCondLt:
439      if (gt_bias) {
440        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
441        branch->target = not_taken;
442      }
443      ccode = kCondUlt;
444      break;
445    case kCondLe:
446      if (gt_bias) {
447        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
448        branch->target = not_taken;
449      }
450      ccode = kCondLs;
451      break;
452    case kCondGt:
453      if (gt_bias) {
454        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
455        branch->target = taken;
456      }
457      ccode = kCondHi;
458      break;
459    case kCondGe:
460      if (gt_bias) {
461        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
462        branch->target = taken;
463      }
464      ccode = kCondUge;
465      break;
466    default:
467      LOG(FATAL) << "Unexpected ccode: " << ccode;
468  }
469  OpCondBranch(ccode, taken);
470}
471
472void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
473  RegLocation rl_result;
474  rl_src = LoadValue(rl_src, kCoreReg);
475  rl_result = EvalLoc(rl_dest, kCoreReg, true);
476  OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
477  StoreValue(rl_dest, rl_result);
478}
479
480void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
481  RegLocation rl_result;
482  rl_src = LoadValueWide(rl_src, kCoreReg);
483  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
484  if (Gen64Bit()) {
485    LoadConstantWide(rl_result.reg, 0x8000000000000000);
486    OpRegReg(kOpAdd, rl_result.reg, rl_src.reg);
487  } else {
488    OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
489    OpRegCopy(rl_result.reg, rl_src.reg);
490  }
491  StoreValueWide(rl_dest, rl_result);
492}
493
494bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) {
495  RegLocation rl_src = info->args[0];
496  RegLocation rl_dest = InlineTargetWide(info);  // double place for result
497  rl_src = LoadValueWide(rl_src, kFPReg);
498  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
499  NewLIR2(kX86SqrtsdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
500  StoreValueWide(rl_dest, rl_result);
501  return true;
502}
503
504
505
506}  // namespace art
507