1%def header():
2/*
3 * Copyright (C) 2016 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *      http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18/*
19  Art assembly interpreter notes:
20
21  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
22  handle invoke, allows higher-level code to create frame & shadow frame.
23
24  Once that's working, support direct entry code & eliminate shadow frame (and
25  excess locals allocation.
26
27  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
28  base of the vreg array within the shadow frame.  Access the other fields,
29  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
30  the shadow frame mechanism of double-storing object references - via rFP &
31  number_of_vregs_.
32
33 */
34
35/*
36x86 ABI general notes:
37
38Caller save set:
39   eax, edx, ecx, st(0)-st(7)
40Callee save set:
41   ebx, esi, edi, ebp
42Return regs:
43   32-bit in eax
44   64-bit in edx:eax (low-order 32 in eax)
45   fp on top of fp stack st(0)
46
47Parameters passed on stack, pushed right-to-left.  On entry to target, first
48parm is at 4(%esp).  Traditional entry code is:
49
50functEntry:
51    push    %ebp             # save old frame pointer
52    mov     %ebp,%esp        # establish new frame pointer
53    sub     FrameSize,%esp   # Allocate storage for spill, locals & outs
54
55Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
56
57Stack must be 16-byte aligned to support SSE in native code.
58
59If we're not doing variable stack allocation (alloca), the frame pointer can be
60eliminated and all arg references adjusted to be esp relative.
61*/
62
63/*
64Mterp and x86 notes:
65
66Some key interpreter variables will be assigned to registers.
67
68  nick     reg   purpose
69  rPC      esi   interpreted program counter, used for fetching instructions
70  rFP      edi   interpreted frame pointer, used for accessing locals and args
71  rINSTw   bx    first 16-bit code of current instruction
72  rINSTbl  bl    opcode portion of instruction word
73  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
74  rIBASE   edx   base of instruction handler table
75  rREFS    ebp   base of object references in shadow frame.
76
77Notes:
78   o High order 16 bits of ebx must be zero on entry to handler
79   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
80   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
81
82Macros are provided for common operations.  Each macro MUST emit only
83one instruction to make instruction-counting easier.  They MUST NOT alter
84unspecified registers or condition codes.
85*/
86
87/*
88 * This is a #include, not a %include, because we want the C pre-processor
89 * to expand the macros into assembler assignment statements.
90 */
91#include "asm_support.h"
92#include "interpreter/cfi_asm_support.h"
93
94#define LITERAL(value) $$(value)
95
96/*
97 * Handle mac compiler specific
98 */
99#if defined(__APPLE__)
100    #define MACRO_LITERAL(value) $$(value)
101    #define FUNCTION_TYPE(name)
102    #define OBJECT_TYPE(name)
103    #define SIZE(start,end)
104    // Mac OS' symbols have an _ prefix.
105    #define SYMBOL(name) _ ## name
106    #define ASM_HIDDEN .private_extern
107#else
108    #define MACRO_LITERAL(value) $$value
109    #define FUNCTION_TYPE(name) .type name, @function
110    #define OBJECT_TYPE(name) .type name, @object
111    #define SIZE(start,end) .size start, .-end
112    #define SYMBOL(name) name
113    #define ASM_HIDDEN .hidden
114#endif
115
116.macro PUSH _reg
117    pushl \_reg
118    .cfi_adjust_cfa_offset 4
119    .cfi_rel_offset \_reg, 0
120.endm
121
122.macro POP _reg
123    popl \_reg
124    .cfi_adjust_cfa_offset -4
125    .cfi_restore \_reg
126.endm
127
128/*
129 * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
130 * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
131 */
132#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
133#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
134#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
135#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
136#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
137#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
138#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
139#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
140#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
141#define OFF_FP_SHADOWFRAME OFF_FP(0)
142
143/* Frame size must be 16-byte aligned.
144 * Remember about 4 bytes for return address + 4 * 4 for spills
145 */
146#define FRAME_SIZE     28
147
148/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
149#define IN_ARG3        (FRAME_SIZE + 16 + 16)
150#define IN_ARG2        (FRAME_SIZE + 16 + 12)
151#define IN_ARG1        (FRAME_SIZE + 16 +  8)
152#define IN_ARG0        (FRAME_SIZE + 16 +  4)
153/* Spill offsets relative to %esp */
154#define LOCAL0         (FRAME_SIZE -  4)
155#define LOCAL1         (FRAME_SIZE -  8)
156#define LOCAL2         (FRAME_SIZE - 12)
157/* Out Arg offsets, relative to %esp */
158#define OUT_ARG3       ( 12)
159#define OUT_ARG2       (  8)
160#define OUT_ARG1       (  4)
161#define OUT_ARG0       (  0)  /* <- ExecuteMterpImpl esp + 0 */
162
163/* During bringup, we'll use the shadow frame model instead of rFP */
164/* single-purpose registers, given names for clarity */
165#define rSELF    IN_ARG0(%esp)
166#define rPC      %esi
167#define CFI_DEX  6  // DWARF register number of the register holding dex-pc (esi).
168#define CFI_TMP  0  // DWARF register number of the first argument register (eax).
169#define rFP      %edi
170#define rINST    %ebx
171#define rINSTw   %bx
172#define rINSTbh  %bh
173#define rINSTbl  %bl
174#define rIBASE   %edx
175#define rREFS    %ebp
176#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
177
178#define MTERP_LOGGING 0
179
180/*
181 * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
182 * be done *before* something throws.
183 *
184 * It's okay to do this more than once.
185 *
186 * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
187 * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
188 * offset into the code_items_[] array.  For effiency, we will "export" the
189 * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
190 * to convert to a dex pc when needed.
191 */
192.macro EXPORT_PC
193    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
194.endm
195
196/*
197 * Refresh handler table.
198 */
199.macro REFRESH_IBASE
200    movl    rSELF, rIBASE
201    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
202.endm
203
204/*
205 * Refresh handler table.
206 * IBase handles uses the caller save register so we must restore it after each call.
207 * Also it is used as a result of some 64-bit operations (like imul) and we should
208 * restore it in such cases also.
209 *
210 * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
211 */
212.macro RESTORE_IBASE
213    movl    rSELF, rIBASE
214    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
215.endm
216
217/*
218 * If rSELF is already loaded then we can use it from known reg.
219 */
220.macro RESTORE_IBASE_FROM_SELF _reg
221    movl    THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
222.endm
223
224/*
225 * Refresh rINST.
226 * At enter to handler rINST does not contain the opcode number.
227 * However some utilities require the full value, so this macro
228 * restores the opcode number.
229 */
230.macro REFRESH_INST _opnum
231    movb    rINSTbl, rINSTbh
232    movb    MACRO_LITERAL(\_opnum), rINSTbl
233.endm
234
235/*
236 * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
237 */
238.macro FETCH_INST
239    movzwl  (rPC), rINST
240.endm
241
242/*
243 * Remove opcode from rINST, compute the address of handler and jump to it.
244 */
245.macro GOTO_NEXT
246    movzx   rINSTbl,%eax
247    movzbl  rINSTbh,rINST
248    shll    MACRO_LITERAL(${handler_size_bits}), %eax
249    addl    rIBASE, %eax
250    jmp     *%eax
251.endm
252
253/*
254 * Advance rPC by instruction count.
255 */
256.macro ADVANCE_PC _count
257    leal    2*\_count(rPC), rPC
258.endm
259
260/*
261 * Advance rPC by instruction count, fetch instruction and jump to handler.
262 */
263.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
264    ADVANCE_PC \_count
265    FETCH_INST
266    GOTO_NEXT
267.endm
268
269/*
270 * Get/set the 32-bit value from a Dalvik register.
271 */
272#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
273#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
274#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
275#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
276
277.macro GET_VREG _reg _vreg
278    movl    VREG_ADDRESS(\_vreg), \_reg
279.endm
280
281/* Read wide value to xmm. */
282.macro GET_WIDE_FP_VREG _reg _vreg
283    movq    VREG_ADDRESS(\_vreg), \_reg
284.endm
285
286.macro SET_VREG _reg _vreg
287    movl    \_reg, VREG_ADDRESS(\_vreg)
288    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
289.endm
290
291/* Write wide value from xmm. xmm is clobbered. */
292.macro SET_WIDE_FP_VREG _reg _vreg
293    movq    \_reg, VREG_ADDRESS(\_vreg)
294    pxor    \_reg, \_reg
295    movq    \_reg, VREG_REF_ADDRESS(\_vreg)
296.endm
297
298.macro SET_VREG_OBJECT _reg _vreg
299    movl    \_reg, VREG_ADDRESS(\_vreg)
300    movl    \_reg, VREG_REF_ADDRESS(\_vreg)
301.endm
302
303.macro GET_VREG_HIGH _reg _vreg
304    movl    VREG_HIGH_ADDRESS(\_vreg), \_reg
305.endm
306
307.macro SET_VREG_HIGH _reg _vreg
308    movl    \_reg, VREG_HIGH_ADDRESS(\_vreg)
309    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
310.endm
311
312.macro CLEAR_REF _vreg
313    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
314.endm
315
316.macro CLEAR_WIDE_REF _vreg
317    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
318    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
319.endm
320
321.macro GET_VREG_XMMs _xmmreg _vreg
322    movss VREG_ADDRESS(\_vreg), \_xmmreg
323.endm
324.macro GET_VREG_XMMd _xmmreg _vreg
325    movsd VREG_ADDRESS(\_vreg), \_xmmreg
326.endm
327.macro SET_VREG_XMMs _xmmreg _vreg
328    movss \_xmmreg, VREG_ADDRESS(\_vreg)
329.endm
330.macro SET_VREG_XMMd _xmmreg _vreg
331    movsd \_xmmreg, VREG_ADDRESS(\_vreg)
332.endm
333
334/*
335 * function support macros.
336 */
337.macro ENTRY name
338    .text
339    ASM_HIDDEN SYMBOL(\name)
340    .global SYMBOL(\name)
341    FUNCTION_TYPE(\name)
342SYMBOL(\name):
343.endm
344
345.macro END name
346    SIZE(\name,\name)
347.endm
348
349%def entry():
350/*
351 * Copyright (C) 2016 The Android Open Source Project
352 *
353 * Licensed under the Apache License, Version 2.0 (the "License");
354 * you may not use this file except in compliance with the License.
355 * You may obtain a copy of the License at
356 *
357 *      http://www.apache.org/licenses/LICENSE-2.0
358 *
359 * Unless required by applicable law or agreed to in writing, software
360 * distributed under the License is distributed on an "AS IS" BASIS,
361 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
362 * See the License for the specific language governing permissions and
363 * limitations under the License.
364 */
365/*
366 * Interpreter entry point.
367 *
368 * On entry:
369 *  0  Thread* self
370 *  1  insns_
371 *  2  ShadowFrame
372 *  3  JValue* result_register
373 *
374 */
375ENTRY ExecuteMterpImpl
376    .cfi_startproc
377    .cfi_def_cfa esp, 4
378
379    /* Spill callee save regs */
380    PUSH    %ebp
381    PUSH    %edi
382    PUSH    %esi
383    PUSH    %ebx
384
385    /* Allocate frame */
386    subl    $$FRAME_SIZE, %esp
387    .cfi_adjust_cfa_offset FRAME_SIZE
388
389    /* Load ShadowFrame pointer */
390    movl    IN_ARG2(%esp), %edx
391
392    /* Remember the return register */
393    movl    IN_ARG3(%esp), %eax
394    movl    %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
395
396    /* Remember the code_item */
397    movl    IN_ARG1(%esp), %ecx
398    movl    %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
399
400    /* set up "named" registers */
401    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
402    leal    SHADOWFRAME_VREGS_OFFSET(%edx), rFP
403    leal    (rFP, %eax, 4), rREFS
404    movl    SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
405    lea     (%ecx, %eax, 2), rPC
406    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
407    EXPORT_PC
408
409    /* Set up for backwards branches & osr profiling */
410    movl    OFF_FP_METHOD(rFP), %eax
411    movl    %eax, OUT_ARG0(%esp)
412    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
413    movl    %ecx, OUT_ARG1(%esp)
414    movl    rSELF, %eax
415    movl    %eax, OUT_ARG2(%esp)
416    call    SYMBOL(MterpSetUpHotnessCountdown)
417
418    /* Starting ibase */
419    REFRESH_IBASE
420
421    /* start executing the instruction at rPC */
422    FETCH_INST
423    GOTO_NEXT
424    /* NOTE: no fallthrough */
425    // cfi info continues, and covers the whole mterp implementation.
426    END ExecuteMterpImpl
427
428%def dchecks_before_helper():
429    // Call C++ to do debug checks and return to the handler using tail call.
430    .extern MterpCheckBefore
431    popl    %eax                     # Return address (the instuction handler).
432    movl    rSELF, %ecx
433    movl    %ecx, OUT_ARG0(%esp)
434    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
435    movl    %ecx, OUT_ARG1(%esp)
436    movl    rPC, OUT_ARG2(%esp)
437    pushl   %eax                     # Return address for the tail call.
438    jmp     SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
439
440%def opcode_pre():
441%  add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
442    #if !defined(NDEBUG)
443    call    SYMBOL(mterp_dchecks_before_helper)
444    REFRESH_IBASE
445    #endif
446
447%def fallback():
448/* Transfer stub to alternate interpreter */
449    jmp     MterpFallback
450
451%def helpers():
452    ENTRY MterpHelpers
453
454%def footer():
455/*
456 * ===========================================================================
457 *  Common subroutines and data
458 * ===========================================================================
459 */
460
461    .text
462    .align  2
463
464/*
465 * We've detected a condition that will result in an exception, but the exception
466 * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
467 * TUNING: for consistency, we may want to just go ahead and handle these here.
468 */
469common_errDivideByZero:
470    EXPORT_PC
471#if MTERP_LOGGING
472    movl    rSELF, %eax
473    movl    %eax, OUT_ARG0(%esp)
474    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
475    movl    %ecx, OUT_ARG1(%esp)
476    call    SYMBOL(MterpLogDivideByZeroException)
477#endif
478    jmp     MterpCommonFallback
479
480common_errArrayIndex:
481    EXPORT_PC
482#if MTERP_LOGGING
483    movl    rSELF, %eax
484    movl    %eax, OUT_ARG0(%esp)
485    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
486    movl    %ecx, OUT_ARG1(%esp)
487    call    SYMBOL(MterpLogArrayIndexException)
488#endif
489    jmp     MterpCommonFallback
490
491common_errNegativeArraySize:
492    EXPORT_PC
493#if MTERP_LOGGING
494    movl    rSELF, %eax
495    movl    %eax, OUT_ARG0(%esp)
496    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
497    movl    %ecx, OUT_ARG1(%esp)
498    call    SYMBOL(MterpLogNegativeArraySizeException)
499#endif
500    jmp     MterpCommonFallback
501
502common_errNoSuchMethod:
503    EXPORT_PC
504#if MTERP_LOGGING
505    movl    rSELF, %eax
506    movl    %eax, OUT_ARG0(%esp)
507    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
508    movl    %ecx, OUT_ARG1(%esp)
509    call    SYMBOL(MterpLogNoSuchMethodException)
510#endif
511    jmp     MterpCommonFallback
512
513common_errNullObject:
514    EXPORT_PC
515#if MTERP_LOGGING
516    movl    rSELF, %eax
517    movl    %eax, OUT_ARG0(%esp)
518    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
519    movl    %ecx, OUT_ARG1(%esp)
520    call    SYMBOL(MterpLogNullObjectException)
521#endif
522    jmp     MterpCommonFallback
523
524common_exceptionThrown:
525    EXPORT_PC
526#if MTERP_LOGGING
527    movl    rSELF, %eax
528    movl    %eax, OUT_ARG0(%esp)
529    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
530    movl    %ecx, OUT_ARG0(%esp)
531    call    SYMBOL(MterpLogExceptionThrownException)
532#endif
533    jmp     MterpCommonFallback
534
535MterpSuspendFallback:
536    EXPORT_PC
537#if MTERP_LOGGING
538    movl    rSELF, %eax
539    movl    %eax, OUT_ARG0(%esp)
540    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
541    movl    %ecx, OUT_ARG0(%esp)
542    movl    THREAD_FLAGS_OFFSET(%eax), %eax
543    movl    %eax, OUT_ARG2(%esp)
544    call    SYMBOL(MterpLogSuspendFallback)
545#endif
546    jmp     MterpCommonFallback
547
548/*
549 * If we're here, something is out of the ordinary.  If there is a pending
550 * exception, handle it.  Otherwise, roll back and retry with the reference
551 * interpreter.
552 */
553MterpPossibleException:
554    movl    rSELF, %eax
555    testl   $$-1, THREAD_EXCEPTION_OFFSET(%eax)
556    jz      MterpFallback
557    /* intentional fallthrough - handle pending exception. */
558
559/*
560 * On return from a runtime helper routine, we've found a pending exception.
561 * Can we handle it here - or need to bail out to caller?
562 *
563 */
564MterpException:
565    movl    rSELF, %eax
566    movl    %eax, OUT_ARG0(%esp)
567    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
568    movl    %ecx, OUT_ARG1(%esp)
569    call    SYMBOL(MterpHandleException)
570    testb   %al, %al
571    jz      MterpExceptionReturn
572    movl    OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
573    movl    OFF_FP_DEX_PC(rFP), %ecx
574    lea     (%eax, %ecx, 2), rPC
575    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
576    /* Do we need to switch interpreters? */
577    movl    rSELF, %eax
578    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
579    jz      MterpFallback
580    /* resume execution at catch block */
581    REFRESH_IBASE
582    FETCH_INST
583    GOTO_NEXT
584    /* NOTE: no fallthrough */
585
586/*
587 * Common handling for branches with support for Jit profiling.
588 * On entry:
589 *    rINST          <= signed offset
590 *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
591 *
592 * We have quite a few different cases for branch profiling, OSR detection and
593 * suspend check support here.
594 *
595 * Taken backward branches:
596 *    If profiling active, do hotness countdown and report if we hit zero.
597 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
598 *    Is there a pending suspend request?  If so, suspend.
599 *
600 * Taken forward branches and not-taken backward branches:
601 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
602 *
603 * Our most common case is expected to be a taken backward branch with active jit profiling,
604 * but no full OSR check and no pending suspend request.
605 * Next most common case is not-taken branch with no full OSR check.
606 *
607 */
608MterpCommonTakenBranch:
609    jg      .L_forward_branch               # don't add forward branches to hotness
610/*
611 * We need to subtract 1 from positive values and we should not see 0 here,
612 * so we may use the result of the comparison with -1.
613 */
614#if JIT_CHECK_OSR != -1
615#  error "JIT_CHECK_OSR must be -1."
616#endif
617    cmpw    $$JIT_CHECK_OSR, rPROFILE
618    je      .L_osr_check
619    decw    rPROFILE
620    je      .L_add_batch                    # counted down to zero - report
621.L_resume_backward_branch:
622    movl    rSELF, %eax
623    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
624    leal    (rPC, rINST, 2), rPC
625    FETCH_INST
626    jnz     .L_suspend_request_pending
627    REFRESH_IBASE
628    GOTO_NEXT
629
630.L_suspend_request_pending:
631    EXPORT_PC
632    movl    %eax, OUT_ARG0(%esp)            # rSELF in eax
633    call    SYMBOL(MterpSuspendCheck)       # (self)
634    testb   %al, %al
635    jnz     MterpFallback
636    REFRESH_IBASE                           # might have changed during suspend
637    GOTO_NEXT
638
639.L_no_count_backwards:
640    cmpw    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
641    jne     .L_resume_backward_branch
642.L_osr_check:
643    EXPORT_PC
644    movl    rSELF, %eax
645    movl    %eax, OUT_ARG0(%esp)
646    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
647    movl    %ecx, OUT_ARG1(%esp)
648    movl    rINST, OUT_ARG2(%esp)
649    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
650    testb   %al, %al
651    jz      .L_resume_backward_branch
652    jmp     MterpOnStackReplacement
653
654.L_forward_branch:
655    cmpw    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
656    je      .L_check_osr_forward
657.L_resume_forward_branch:
658    leal    (rPC, rINST, 2), rPC
659    FETCH_INST
660    GOTO_NEXT
661
662.L_check_osr_forward:
663    EXPORT_PC
664    movl    rSELF, %eax
665    movl    %eax, OUT_ARG0(%esp)
666    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
667    movl    %ecx, OUT_ARG1(%esp)
668    movl    rINST, OUT_ARG2(%esp)
669    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
670    testb   %al, %al
671    REFRESH_IBASE
672    jz      .L_resume_forward_branch
673    jmp     MterpOnStackReplacement
674
675.L_add_batch:
676    movl    OFF_FP_METHOD(rFP), %eax
677    movl    %eax, OUT_ARG0(%esp)
678    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
679    movl    %ecx, OUT_ARG1(%esp)
680    movl    rSELF, %eax
681    movl    %eax, OUT_ARG2(%esp)
682    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
683    jmp     .L_no_count_backwards
684
685/*
686 * Entered from the conditional branch handlers when OSR check request active on
687 * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
688 */
689.L_check_not_taken_osr:
690    EXPORT_PC
691    movl    rSELF, %eax
692    movl    %eax, OUT_ARG0(%esp)
693    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
694    movl    %ecx, OUT_ARG1(%esp)
695    movl    $$2, OUT_ARG2(%esp)
696    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
697    testb   %al, %al
698    REFRESH_IBASE
699    jnz     MterpOnStackReplacement
700    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
701
702/*
703 * On-stack replacement has happened, and now we've returned from the compiled method.
704 */
705MterpOnStackReplacement:
706#if MTERP_LOGGING
707    movl    rSELF, %eax
708    movl    %eax, OUT_ARG0(%esp)
709    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
710    movl    %ecx, OUT_ARG1(%esp)
711    movl    rINST, OUT_ARG2(%esp)
712    call    SYMBOL(MterpLogOSR)
713#endif
714    movl    $$1, %eax
715    jmp     MterpDone
716
717/*
718 * Bail out to reference interpreter.
719 */
720MterpFallback:
721    EXPORT_PC
722#if MTERP_LOGGING
723    movl    rSELF, %eax
724    movl    %eax, OUT_ARG0(%esp)
725    lea     OFF_FP_SHADOWFRAME(rFP), %ecx
726    movl    %ecx, OUT_ARG1(%esp)
727    call    SYMBOL(MterpLogFallback)
728#endif
729MterpCommonFallback:
730    xor     %eax, %eax
731    jmp     MterpDone
732
733/*
734 * On entry:
735 *  uint32_t* rFP  (should still be live, pointer to base of vregs)
736 */
737MterpExceptionReturn:
738    movl    $$1, %eax
739    jmp     MterpDone
740MterpReturn:
741    movl    OFF_FP_RESULT_REGISTER(rFP), %edx
742    movl    %eax, (%edx)
743    movl    %ecx, 4(%edx)
744    mov     $$1, %eax
745MterpDone:
746/*
747 * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
748 * checking for OSR.  If greater than zero, we might have unreported hotness to register
749 * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
750 * should only reach zero immediately after a hotness decrement, and is then reset to either
751 * a negative special state or the new non-zero countdown value.
752 */
753    cmpw    $$0, rPROFILE
754    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
755
756    movl    %eax, rINST                     # stash return value
757    /* Report cached hotness counts */
758    movl    OFF_FP_METHOD(rFP), %eax
759    movl    %eax, OUT_ARG0(%esp)
760    leal    OFF_FP_SHADOWFRAME(rFP), %ecx
761    movl    %ecx, OUT_ARG1(%esp)
762    movl    rSELF, %eax
763    movl    %eax, OUT_ARG2(%esp)
764    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
765    movl    rINST, %eax                     # restore return value
766
767    /* pop up frame */
768MRestoreFrame:
769    addl    $$FRAME_SIZE, %esp
770    .cfi_adjust_cfa_offset -FRAME_SIZE
771
772    /* Restore callee save register */
773    POP     %ebx
774    POP     %esi
775    POP     %edi
776    POP     %ebp
777    ret
778    .cfi_endproc
779    END MterpHelpers
780
781%def instruction_end():
782
783    OBJECT_TYPE(artMterpAsmInstructionEnd)
784    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
785    .global SYMBOL(artMterpAsmInstructionEnd)
786SYMBOL(artMterpAsmInstructionEnd):
787
788%def instruction_start():
789
790    OBJECT_TYPE(artMterpAsmInstructionStart)
791    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
792    .global SYMBOL(artMterpAsmInstructionStart)
793SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
794    .text
795
796%def default_helper_prefix():
797%  return "mterp_"
798
799%def opcode_start():
800    ENTRY mterp_${opcode}
801%def opcode_end():
802    END mterp_${opcode}
803%def helper_start(name):
804    ENTRY ${name}
805%def helper_end(name):
806    END ${name}
807