1%def fbinop(instr=""): 2 /* 3 * Generic 32-bit floating-point operation. Provide an "instr" line that 4 * specifies an instruction that performs "s2 = s0 op s1". Because we 5 * use the "softfp" ABI, this must be an instruction, not a function call. 6 * 7 * For: add-float, sub-float, mul-float, div-float 8 */ 9 /* floatop vAA, vBB, vCC */ 10 FETCH r0, 1 @ r0<- CCBB 11 mov r4, rINST, lsr #8 @ r4<- AA 12 mov r3, r0, lsr #8 @ r3<- CC 13 and r2, r0, #255 @ r2<- BB 14 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC 15 VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB 16 GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC 17 GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB 18 19 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST 20 $instr @ s2<- op 21 GET_INST_OPCODE ip @ extract opcode from rINST 22 SET_VREG_FLOAT s2, r4, lr @ vAA<- s2 23 GOTO_OPCODE ip @ jump to next instruction 24 25%def fbinop2addr(instr=""): 26 /* 27 * Generic 32-bit floating point "/2addr" binary operation. Provide 28 * an "instr" line that specifies an instruction that performs 29 * "s2 = s0 op s1". 30 * 31 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 32 */ 33 /* binop/2addr vA, vB */ 34 mov r3, rINST, lsr #12 @ r3<- B 35 ubfx r4, rINST, #8, #4 @ r4<- A 36 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB 37 VREG_INDEX_TO_ADDR r4, r4 @ r4<- &vA 38 GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vB 39 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST 40 GET_VREG_FLOAT_BY_ADDR s0, r4 @ s0<- vA 41 $instr @ s2<- op 42 GET_INST_OPCODE ip @ extract opcode from rINST 43 SET_VREG_FLOAT_BY_ADDR s2, r4 @ vAA<- s2 No need to clear as it's 2addr 44 GOTO_OPCODE ip @ jump to next instruction 45 46%def fbinopWide(instr=""): 47 /* 48 * Generic 64-bit double-precision floating point binary operation. 49 * Provide an "instr" line that specifies an instruction that performs 50 * "d2 = d0 op d1". 51 * 52 * for: add-double, sub-double, mul-double, div-double 53 */ 54 /* doubleop vAA, vBB, vCC */ 55 FETCH r0, 1 @ r0<- CCBB 56 mov r4, rINST, lsr #8 @ r4<- AA 57 mov r3, r0, lsr #8 @ r3<- CC 58 and r2, r0, #255 @ r2<- BB 59 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC 60 VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB 61 GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC 62 GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB 63 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST 64 $instr @ d2<- op 65 CLEAR_SHADOW_PAIR r4, ip, lr @ Zero shadow regs 66 GET_INST_OPCODE ip @ extract opcode from rINST 67 VREG_INDEX_TO_ADDR r4, r4 @ r4<- &vAA 68 SET_VREG_DOUBLE_BY_ADDR d2, r4 @ vAA<- d2 69 GOTO_OPCODE ip @ jump to next instruction 70 71%def fbinopWide2addr(instr=""): 72 /* 73 * Generic 64-bit floating point "/2addr" binary operation. Provide 74 * an "instr" line that specifies an instruction that performs 75 * "d2 = d0 op d1". 76 * 77 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 78 * div-double/2addr 79 */ 80 /* binop/2addr vA, vB */ 81 mov r3, rINST, lsr #12 @ r3<- B 82 ubfx r4, rINST, #8, #4 @ r4<- A 83 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB 84 CLEAR_SHADOW_PAIR r4, ip, r0 @ Zero out shadow regs 85 GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vB 86 VREG_INDEX_TO_ADDR r4, r4 @ r4<- &vA 87 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST 88 GET_VREG_DOUBLE_BY_ADDR d0, r4 @ d0<- vA 89 $instr @ d2<- op 90 GET_INST_OPCODE ip @ extract opcode from rINST 91 SET_VREG_DOUBLE_BY_ADDR d2, r4 @ vAA<- d2 92 GOTO_OPCODE ip @ jump to next instruction 93 94%def funop(instr=""): 95 /* 96 * Generic 32-bit unary floating-point operation. Provide an "instr" 97 * line that specifies an instruction that performs "s1 = op s0". 98 * 99 * for: int-to-float, float-to-int 100 */ 101 /* unop vA, vB */ 102 mov r3, rINST, lsr #12 @ r3<- B 103 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB 104 GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB 105 ubfx r4, rINST, #8, #4 @ r4<- A 106 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST 107 $instr @ s1<- op 108 GET_INST_OPCODE ip @ extract opcode from rINST 109 SET_VREG_FLOAT s1, r4, lr @ vA<- s1 110 GOTO_OPCODE ip @ jump to next instruction 111 112%def funopNarrower(instr=""): 113 /* 114 * Generic 64bit-to-32bit unary floating point operation. Provide an 115 * "instr" line that specifies an instruction that performs "s0 = op d0". 116 * 117 * For: double-to-int, double-to-float 118 */ 119 /* unop vA, vB */ 120 mov r3, rINST, lsr #12 @ r3<- B 121 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB 122 GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vB 123 ubfx r4, rINST, #8, #4 @ r4<- A 124 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST 125 $instr @ s0<- op 126 GET_INST_OPCODE ip @ extract opcode from rINST 127 SET_VREG_FLOAT s0, r4, lr @ vA<- s0 128 GOTO_OPCODE ip @ jump to next instruction 129 130%def funopWider(instr=""): 131 /* 132 * Generic 32bit-to-64bit floating point unary operation. Provide an 133 * "instr" line that specifies an instruction that performs "d0 = op s0". 134 * 135 * For: int-to-double, float-to-double 136 */ 137 /* unop vA, vB */ 138 mov r3, rINST, lsr #12 @ r3<- B 139 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB 140 GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB 141 ubfx r4, rINST, #8, #4 @ r4<- A 142 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST 143 $instr @ d0<- op 144 CLEAR_SHADOW_PAIR r4, ip, lr @ Zero shadow regs 145 GET_INST_OPCODE ip @ extract opcode from rINST 146 VREG_INDEX_TO_ADDR r4, r4 @ r4<- &vA 147 SET_VREG_DOUBLE_BY_ADDR d0, r4 @ vA<- d0 148 GOTO_OPCODE ip @ jump to next instruction 149 150%def op_add_double(): 151% fbinopWide(instr="faddd d2, d0, d1") 152 153%def op_add_double_2addr(): 154% fbinopWide2addr(instr="faddd d2, d0, d1") 155 156%def op_add_float(): 157% fbinop(instr="fadds s2, s0, s1") 158 159%def op_add_float_2addr(): 160% fbinop2addr(instr="fadds s2, s0, s1") 161 162%def op_cmpg_double(): 163 /* 164 * Compare two floating-point values. Puts 0, 1, or -1 into the 165 * destination register based on the results of the comparison. 166 * 167 * int compare(x, y) { 168 * if (x == y) { 169 * return 0; 170 * } else if (x < y) { 171 * return -1; 172 * } else if (x > y) { 173 * return 1; 174 * } else { 175 * return 1; 176 * } 177 * } 178 */ 179 /* op vAA, vBB, vCC */ 180 FETCH r0, 1 @ r0<- CCBB 181 mov r4, rINST, lsr #8 @ r4<- AA 182 and r2, r0, #255 @ r2<- BB 183 mov r3, r0, lsr #8 @ r3<- CC 184 VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB 185 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC 186 GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB 187 GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC 188 vcmpe.f64 d0, d1 @ compare (vBB, vCC) 189 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST 190 mvn r0, #0 @ r0<- -1 (default) 191 GET_INST_OPCODE ip @ extract opcode from rINST 192 fmstat 193 it hi 194 movhi r0, #1 @ (greater than, or unordered) r0<- 1 195 moveq r0, #0 @ (equal) r0<- 0 196 SET_VREG r0, r4 @ vAA<- r0 197 GOTO_OPCODE ip @ jump to next instruction 198 199%def op_cmpg_float(): 200 /* 201 * Compare two floating-point values. Puts 0, 1, or -1 into the 202 * destination register based on the results of the comparison. 203 * 204 * int compare(x, y) { 205 * if (x == y) { 206 * return 0; 207 * } else if (x < y) { 208 * return -1; 209 * } else if (x > y) { 210 * return 1; 211 * } else { 212 * return 1; 213 * } 214 * } 215 */ 216 /* op vAA, vBB, vCC */ 217 FETCH r0, 1 @ r0<- CCBB 218 mov r4, rINST, lsr #8 @ r4<- AA 219 and r2, r0, #255 @ r2<- BB 220 mov r3, r0, lsr #8 @ r3<- CC 221 VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB 222 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC 223 GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB 224 GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC 225 vcmpe.f32 s0, s1 @ compare (vBB, vCC) 226 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST 227 mvn r0, #0 @ r0<- -1 (default) 228 GET_INST_OPCODE ip @ extract opcode from rINST 229 fmstat 230 it hi 231 movhi r0, #1 @ (greater than, or unordered) r0<- 1 232 moveq r0, #0 @ (equal) r0<- 0 233 SET_VREG r0, r4 @ vAA<- r0 234 GOTO_OPCODE ip @ jump to next instruction 235 236%def op_cmpl_double(): 237 /* 238 * Compare two floating-point values. Puts 0, 1, or -1 into the 239 * destination register based on the results of the comparison. 240 * 241 * int compare(x, y) { 242 * if (x == y) { 243 * return 0; 244 * } else if (x > y) { 245 * return 1; 246 * } else if (x < y) { 247 * return -1; 248 * } else { 249 * return -1; 250 * } 251 * } 252 */ 253 /* op vAA, vBB, vCC */ 254 FETCH r0, 1 @ r0<- CCBB 255 mov r4, rINST, lsr #8 @ r4<- AA 256 and r2, r0, #255 @ r2<- BB 257 mov r3, r0, lsr #8 @ r3<- CC 258 VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB 259 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC 260 GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB 261 GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC 262 vcmpe.f64 d0, d1 @ compare (vBB, vCC) 263 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST 264 mvn r0, #0 @ r0<- -1 (default) 265 GET_INST_OPCODE ip @ extract opcode from rINST 266 fmstat @ export status flags 267 it gt 268 movgt r0, #1 @ (greater than) r1<- 1 269 it eq 270 moveq r0, #0 @ (equal) r1<- 0 271 SET_VREG r0, r4 @ vAA<- r0 272 GOTO_OPCODE ip @ jump to next instruction 273 274%def op_cmpl_float(): 275 /* 276 * Compare two floating-point values. Puts 0, 1, or -1 into the 277 * destination register based on the results of the comparison. 278 * 279 * int compare(x, y) { 280 * if (x == y) { 281 * return 0; 282 * } else if (x > y) { 283 * return 1; 284 * } else if (x < y) { 285 * return -1; 286 * } else { 287 * return -1; 288 * } 289 * } 290 */ 291 /* op vAA, vBB, vCC */ 292 FETCH r0, 1 @ r0<- CCBB 293 mov r4, rINST, lsr #8 @ r4<- AA 294 and r2, r0, #255 @ r2<- BB 295 mov r3, r0, lsr #8 @ r3<- CC 296 VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB 297 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC 298 GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB 299 GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC 300 vcmpe.f32 s0, s1 @ compare (vBB, vCC) 301 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST 302 mvn r0, #0 @ r0<- -1 (default) 303 GET_INST_OPCODE ip @ extract opcode from rINST 304 fmstat @ export status flags 305 it gt 306 movgt r0, #1 @ (greater than) r1<- 1 307 it eq 308 moveq r0, #0 @ (equal) r1<- 0 309 SET_VREG r0, r4 @ vAA<- r0 310 GOTO_OPCODE ip @ jump to next instruction 311 312%def op_div_double(): 313% fbinopWide(instr="fdivd d2, d0, d1") 314 315%def op_div_double_2addr(): 316% fbinopWide2addr(instr="fdivd d2, d0, d1") 317 318%def op_div_float(): 319% fbinop(instr="fdivs s2, s0, s1") 320 321%def op_div_float_2addr(): 322% fbinop2addr(instr="fdivs s2, s0, s1") 323 324%def op_double_to_float(): 325% funopNarrower(instr="vcvt.f32.f64 s0, d0") 326 327%def op_double_to_int(): 328% funopNarrower(instr="vcvt.s32.f64 s0, d0") 329 330%def op_double_to_long(): 331% unopWide(instr="bl nterp_d2l_doconv") 332 333%def op_float_to_double(): 334% funopWider(instr="vcvt.f64.f32 d0, s0") 335 336%def op_float_to_int(): 337% funop(instr="vcvt.s32.f32 s1, s0") 338 339%def op_float_to_long(): 340% unopWider(instr="bl nterp_f2l_doconv") 341 342%def op_int_to_double(): 343% funopWider(instr="vcvt.f64.s32 d0, s0") 344 345%def op_int_to_float(): 346% funop(instr="vcvt.f32.s32 s1, s0") 347 348%def op_long_to_double(): 349 /* 350 * Specialised 64-bit floating point operation. 351 * 352 * Note: The result will be returned in d2. 353 * 354 * For: long-to-double 355 */ 356 mov r3, rINST, lsr #12 @ r3<- B 357 ubfx r4, rINST, #8, #4 @ r4<- A 358 CLEAR_SHADOW_PAIR r4, ip, lr @ Zero shadow regs 359 VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] 360 VREG_INDEX_TO_ADDR r4, r4 @ r4<- &fp[A] 361 GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vBB 362 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST 363 364 vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh) 365 vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl) 366 vldr d3, constval$opcode 367 vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl 368 369 GET_INST_OPCODE ip @ extract opcode from rINST 370 SET_VREG_DOUBLE_BY_ADDR d2, r4 @ vAA<- d2 371 GOTO_OPCODE ip @ jump to next instruction 372 373 /* literal pool helper */ 374constval${opcode}: 375 .8byte 0x41f0000000000000 376 377%def op_long_to_float(): 378% unopNarrower(instr="bl __aeabi_l2f") 379 380%def op_mul_double(): 381% fbinopWide(instr="fmuld d2, d0, d1") 382 383%def op_mul_double_2addr(): 384% fbinopWide2addr(instr="fmuld d2, d0, d1") 385 386%def op_mul_float(): 387% fbinop(instr="fmuls s2, s0, s1") 388 389%def op_mul_float_2addr(): 390% fbinop2addr(instr="fmuls s2, s0, s1") 391 392%def op_neg_double(): 393% unopWide(instr="add r1, r1, #0x80000000") 394 395%def op_neg_float(): 396% unop(instr="add r0, r0, #0x80000000") 397 398%def op_rem_double(): 399/* EABI doesn't define a double remainder function, but libm does */ 400% binopWide(instr="bl fmod") 401 402%def op_rem_double_2addr(): 403/* EABI doesn't define a double remainder function, but libm does */ 404% binopWide2addr(instr="bl fmod") 405 406%def op_rem_float(): 407/* EABI doesn't define a float remainder function, but libm does */ 408% binop(instr="bl fmodf") 409 410%def op_rem_float_2addr(): 411/* EABI doesn't define a float remainder function, but libm does */ 412% binop2addr(instr="bl fmodf") 413 414%def op_sub_double(): 415% fbinopWide(instr="fsubd d2, d0, d1") 416 417%def op_sub_double_2addr(): 418% fbinopWide2addr(instr="fsubd d2, d0, d1") 419 420%def op_sub_float(): 421% fbinop(instr="fsubs s2, s0, s1") 422 423%def op_sub_float_2addr(): 424% fbinop2addr(instr="fsubs s2, s0, s1") 425