1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_arm.h"
18
19 #include <android-base/logging.h>
20
21 #include "arch/arm/jni_frame_arm.h"
22 #include "arch/instruction_set.h"
23 #include "base/macros.h"
24 #include "utils/arm/managed_register_arm.h"
25
26 namespace art {
27 namespace arm {
28
29 //
30 // JNI calling convention constants.
31 //
32
33 // List of parameters passed via registers for JNI.
34 // JNI uses soft-float, so there is only a GPR list.
35 static const Register kJniArgumentRegisters[] = {
36 R0, R1, R2, R3
37 };
38
39 static_assert(kJniArgumentRegisterCount == arraysize(kJniArgumentRegisters));
40
41 //
42 // Managed calling convention constants.
43 //
44
45 // Used by hard float. (General purpose registers.)
46 static const Register kHFCoreArgumentRegisters[] = {
47 R0, R1, R2, R3
48 };
49 static constexpr size_t kHFCoreArgumentRegistersCount = arraysize(kHFCoreArgumentRegisters);
50
51 // (VFP single-precision registers.)
52 static const SRegister kHFSArgumentRegisters[] = {
53 S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15
54 };
55 static constexpr size_t kHFSArgumentRegistersCount = arraysize(kHFSArgumentRegisters);
56
57 // (VFP double-precision registers.)
58 static const DRegister kHFDArgumentRegisters[] = {
59 D0, D1, D2, D3, D4, D5, D6, D7
60 };
61 static constexpr size_t kHFDArgumentRegistersCount = arraysize(kHFDArgumentRegisters);
62
63 static_assert(kHFDArgumentRegistersCount * 2 == kHFSArgumentRegistersCount,
64 "ks d argument registers mismatch");
65
66 //
67 // Shared managed+JNI calling convention constants.
68 //
69
70 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
71 // Core registers.
72 ArmManagedRegister::FromCoreRegister(R5),
73 ArmManagedRegister::FromCoreRegister(R6),
74 ArmManagedRegister::FromCoreRegister(R7),
75 ArmManagedRegister::FromCoreRegister(R8),
76 ArmManagedRegister::FromCoreRegister(R10),
77 ArmManagedRegister::FromCoreRegister(R11),
78 ArmManagedRegister::FromCoreRegister(LR),
79 // Hard float registers.
80 ArmManagedRegister::FromSRegister(S16),
81 ArmManagedRegister::FromSRegister(S17),
82 ArmManagedRegister::FromSRegister(S18),
83 ArmManagedRegister::FromSRegister(S19),
84 ArmManagedRegister::FromSRegister(S20),
85 ArmManagedRegister::FromSRegister(S21),
86 ArmManagedRegister::FromSRegister(S22),
87 ArmManagedRegister::FromSRegister(S23),
88 ArmManagedRegister::FromSRegister(S24),
89 ArmManagedRegister::FromSRegister(S25),
90 ArmManagedRegister::FromSRegister(S26),
91 ArmManagedRegister::FromSRegister(S27),
92 ArmManagedRegister::FromSRegister(S28),
93 ArmManagedRegister::FromSRegister(S29),
94 ArmManagedRegister::FromSRegister(S30),
95 ArmManagedRegister::FromSRegister(S31)
96 };
97
98 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])99 static constexpr uint32_t CalculateCoreCalleeSpillMask(
100 const ManagedRegister (&callee_saves)[size]) {
101 // LR is a special callee save which is not reported by CalleeSaveRegisters().
102 uint32_t result = 0u;
103 for (auto&& r : callee_saves) {
104 if (r.AsArm().IsCoreRegister()) {
105 result |= (1u << r.AsArm().AsCoreRegister());
106 }
107 }
108 return result;
109 }
110
111 template <size_t size>
CalculateFpCalleeSpillMask(const ManagedRegister (& callee_saves)[size])112 static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
113 uint32_t result = 0u;
114 for (auto&& r : callee_saves) {
115 if (r.AsArm().IsSRegister()) {
116 result |= (1u << r.AsArm().AsSRegister());
117 }
118 }
119 return result;
120 }
121
122 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
123 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
124
125 static constexpr ManagedRegister kAapcsCalleeSaveRegisters[] = {
126 // Core registers.
127 ArmManagedRegister::FromCoreRegister(R4),
128 ArmManagedRegister::FromCoreRegister(R5),
129 ArmManagedRegister::FromCoreRegister(R6),
130 ArmManagedRegister::FromCoreRegister(R7),
131 ArmManagedRegister::FromCoreRegister(R8),
132 ArmManagedRegister::FromCoreRegister(R9), // The platform register is callee-save on Android.
133 ArmManagedRegister::FromCoreRegister(R10),
134 ArmManagedRegister::FromCoreRegister(R11),
135 ArmManagedRegister::FromCoreRegister(LR),
136 // Hard float registers.
137 ArmManagedRegister::FromSRegister(S16),
138 ArmManagedRegister::FromSRegister(S17),
139 ArmManagedRegister::FromSRegister(S18),
140 ArmManagedRegister::FromSRegister(S19),
141 ArmManagedRegister::FromSRegister(S20),
142 ArmManagedRegister::FromSRegister(S21),
143 ArmManagedRegister::FromSRegister(S22),
144 ArmManagedRegister::FromSRegister(S23),
145 ArmManagedRegister::FromSRegister(S24),
146 ArmManagedRegister::FromSRegister(S25),
147 ArmManagedRegister::FromSRegister(S26),
148 ArmManagedRegister::FromSRegister(S27),
149 ArmManagedRegister::FromSRegister(S28),
150 ArmManagedRegister::FromSRegister(S29),
151 ArmManagedRegister::FromSRegister(S30),
152 ArmManagedRegister::FromSRegister(S31)
153 };
154
155 static constexpr uint32_t kAapcsCoreCalleeSpillMask =
156 CalculateCoreCalleeSpillMask(kAapcsCalleeSaveRegisters);
157 static constexpr uint32_t kAapcsFpCalleeSpillMask =
158 CalculateFpCalleeSpillMask(kAapcsCalleeSaveRegisters);
159
160 // Calling convention
161
ReturnRegister()162 ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
163 switch (GetShorty()[0]) {
164 case 'V':
165 return ArmManagedRegister::NoRegister();
166 case 'D':
167 return ArmManagedRegister::FromDRegister(D0);
168 case 'F':
169 return ArmManagedRegister::FromSRegister(S0);
170 case 'J':
171 return ArmManagedRegister::FromRegisterPair(R0_R1);
172 default:
173 return ArmManagedRegister::FromCoreRegister(R0);
174 }
175 }
176
ReturnRegister()177 ManagedRegister ArmJniCallingConvention::ReturnRegister() {
178 switch (GetShorty()[0]) {
179 case 'V':
180 return ArmManagedRegister::NoRegister();
181 case 'D':
182 case 'J':
183 return ArmManagedRegister::FromRegisterPair(R0_R1);
184 default:
185 return ArmManagedRegister::FromCoreRegister(R0);
186 }
187 }
188
IntReturnRegister()189 ManagedRegister ArmJniCallingConvention::IntReturnRegister() {
190 return ArmManagedRegister::FromCoreRegister(R0);
191 }
192
193 // Managed runtime calling convention
194
MethodRegister()195 ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
196 return ArmManagedRegister::FromCoreRegister(R0);
197 }
198
ResetIterator(FrameOffset displacement)199 void ArmManagedRuntimeCallingConvention::ResetIterator(FrameOffset displacement) {
200 ManagedRuntimeCallingConvention::ResetIterator(displacement);
201 gpr_index_ = 1u; // Skip r0 for ArtMethod*
202 float_index_ = 0u;
203 double_index_ = 0u;
204 }
205
Next()206 void ArmManagedRuntimeCallingConvention::Next() {
207 if (IsCurrentParamAFloatOrDouble()) {
208 if (float_index_ % 2 == 0) {
209 // The register for the current float is the same as the first register for double.
210 DCHECK_EQ(float_index_, double_index_ * 2u);
211 } else {
212 // There is a space for an extra float before space for a double.
213 DCHECK_LT(float_index_, double_index_ * 2u);
214 }
215 if (IsCurrentParamADouble()) {
216 double_index_ += 1u;
217 if (float_index_ % 2 == 0) {
218 float_index_ = double_index_ * 2u;
219 }
220 } else {
221 if (float_index_ % 2 == 0) {
222 float_index_ += 1u;
223 double_index_ += 1u; // Leaves space for one more float before the next double.
224 } else {
225 float_index_ = double_index_ * 2u;
226 }
227 }
228 } else { // Not a float/double.
229 if (IsCurrentParamALong()) {
230 // Note that the alignment to even register is done lazily.
231 gpr_index_ = RoundUp(gpr_index_, 2u) + 2u;
232 } else {
233 gpr_index_ += 1u;
234 }
235 }
236 ManagedRuntimeCallingConvention::Next();
237 }
238
IsCurrentParamInRegister()239 bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
240 if (IsCurrentParamAFloatOrDouble()) {
241 if (IsCurrentParamADouble()) {
242 return double_index_ < kHFDArgumentRegistersCount;
243 } else {
244 return float_index_ < kHFSArgumentRegistersCount;
245 }
246 } else {
247 if (IsCurrentParamALong()) {
248 // Round up to even register and do not split a long between the last register and the stack.
249 return RoundUp(gpr_index_, 2u) + 1u < kHFCoreArgumentRegistersCount;
250 } else {
251 return gpr_index_ < kHFCoreArgumentRegistersCount;
252 }
253 }
254 }
255
IsCurrentParamOnStack()256 bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
257 return !IsCurrentParamInRegister();
258 }
259
CurrentParamRegister()260 ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
261 DCHECK(IsCurrentParamInRegister());
262 if (IsCurrentParamAFloatOrDouble()) {
263 if (IsCurrentParamADouble()) {
264 return ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[double_index_]);
265 } else {
266 return ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[float_index_]);
267 }
268 } else {
269 if (IsCurrentParamALong()) {
270 // Currently the only register pair for a long parameter is r2-r3.
271 // Note that the alignment to even register is done lazily.
272 CHECK_EQ(RoundUp(gpr_index_, 2u), 2u);
273 return ArmManagedRegister::FromRegisterPair(R2_R3);
274 } else {
275 return ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index_]);
276 }
277 }
278 }
279
CurrentParamStackOffset()280 FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
281 return FrameOffset(displacement_.Int32Value() + // displacement
282 kFramePointerSize + // Method*
283 (itr_slots_ * kFramePointerSize)); // offset into in args
284 }
285
286 // JNI calling convention
287
ArmJniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)288 ArmJniCallingConvention::ArmJniCallingConvention(bool is_static,
289 bool is_synchronized,
290 bool is_critical_native,
291 const char* shorty)
292 : JniCallingConvention(is_static,
293 is_synchronized,
294 is_critical_native,
295 shorty,
296 kArmPointerSize) {
297 // AAPCS 4.1 specifies fundamental alignments for each type. All of our stack arguments are
298 // usually 4-byte aligned, however longs and doubles must be 8 bytes aligned. Add padding to
299 // maintain 8-byte alignment invariant.
300 //
301 // Compute padding to ensure longs and doubles are not split in AAPCS.
302 size_t shift = 0;
303
304 size_t cur_arg, cur_reg;
305 if (LIKELY(HasExtraArgumentsForJni())) {
306 // Ignore the 'this' jobject or jclass for static methods and the JNIEnv.
307 // We start at the aligned register r2.
308 //
309 // Ignore the first 2 parameters because they are guaranteed to be aligned.
310 cur_arg = NumImplicitArgs(); // skip the "this" arg.
311 cur_reg = 2; // skip {r0=JNIEnv, r1=jobject} / {r0=JNIEnv, r1=jclass} parameters (start at r2).
312 } else {
313 // Check every parameter.
314 cur_arg = 0;
315 cur_reg = 0;
316 }
317
318 // TODO: Maybe should just use IsCurrentParamALongOrDouble instead to be cleaner?
319 // (this just seems like an unnecessary micro-optimization).
320
321 // Shift across a logical register mapping that looks like:
322 //
323 // | r0 | r1 | r2 | r3 | SP | SP+4| SP+8 | SP+12 | ... | SP+n | SP+n+4 |
324 //
325 // (where SP is some arbitrary stack pointer that our 0th stack arg would go into).
326 //
327 // Any time there would normally be a long/double in an odd logical register,
328 // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment.
329 //
330 // This works for both physical register pairs {r0, r1}, {r2, r3} and for when
331 // the value is on the stack.
332 //
333 // For example:
334 // (a) long would normally go into r1, but we shift it into r2
335 // | INT | (PAD) | LONG |
336 // | r0 | r1 | r2 | r3 |
337 //
338 // (b) long would normally go into r3, but we shift it into SP
339 // | INT | INT | INT | (PAD) | LONG |
340 // | r0 | r1 | r2 | r3 | SP+4 SP+8|
341 //
342 // where INT is any <=4 byte arg, and LONG is any 8-byte arg.
343 for (; cur_arg < NumArgs(); cur_arg++) {
344 if (IsParamALongOrDouble(cur_arg)) {
345 if ((cur_reg & 1) != 0) { // check that it's in a logical contiguous register pair
346 shift += 4;
347 cur_reg++; // additional bump to ensure alignment
348 }
349 cur_reg += 2; // bump the iterator twice for every long argument
350 } else {
351 cur_reg++; // bump the iterator for every non-long argument
352 }
353 }
354
355 if (cur_reg <= kJniArgumentRegisterCount) {
356 // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
357 // we actually have 0 stack padding.
358 //
359 // For example with @CriticalNative and:
360 // (int, long) -> shifts the long but doesn't need to pad the stack
361 //
362 // shift
363 // \/
364 // | INT | (PAD) | LONG | (EMPTY) ...
365 // | r0 | r1 | r2 | r3 | SP ...
366 // /\
367 // no stack padding
368 padding_ = 0;
369 } else {
370 padding_ = shift;
371 }
372
373 // TODO: add some new JNI tests for @CriticalNative that introduced new edge cases
374 // (a) Using r0,r1 pair = f(long,...)
375 // (b) Shifting r1 long into r2,r3 pair = f(int, long, int, ...);
376 // (c) Shifting but not introducing a stack padding = f(int, long);
377 }
378
CoreSpillMask() const379 uint32_t ArmJniCallingConvention::CoreSpillMask() const {
380 // Compute spill mask to agree with callee saves initialized in the constructor
381 return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
382 }
383
FpSpillMask() const384 uint32_t ArmJniCallingConvention::FpSpillMask() const {
385 return is_critical_native_ ? 0u : kFpCalleeSpillMask;
386 }
387
SavedLocalReferenceCookieRegister() const388 ManagedRegister ArmJniCallingConvention::SavedLocalReferenceCookieRegister() const {
389 // The r5 is callee-save register in both managed and native ABIs.
390 // It is saved in the stack frame and it has no special purpose like `tr`.
391 static_assert((kCoreCalleeSpillMask & (1u << R5)) != 0u); // Managed callee save register.
392 return ArmManagedRegister::FromCoreRegister(R5);
393 }
394
ReturnScratchRegister() const395 ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
396 return ArmManagedRegister::FromCoreRegister(R2);
397 }
398
FrameSize() const399 size_t ArmJniCallingConvention::FrameSize() const {
400 if (UNLIKELY(is_critical_native_)) {
401 CHECK(!SpillsMethod());
402 CHECK(!HasLocalReferenceSegmentState());
403 CHECK(!SpillsReturnValue());
404 return 0u; // There is no managed frame for @CriticalNative.
405 }
406
407 // Method*, callee save area size, local reference segment state
408 DCHECK(SpillsMethod());
409 const size_t method_ptr_size = static_cast<size_t>(kArmPointerSize);
410 const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
411 size_t total_size = method_ptr_size + callee_save_area_size;
412
413 DCHECK(HasLocalReferenceSegmentState());
414 // Cookie is saved in one of the spilled registers.
415
416 // Plus return value spill area size
417 if (SpillsReturnValue()) {
418 // For 64-bit return values there shall be a 4B alignment gap between
419 // the method pointer and the saved return value.
420 size_t padding = ReturnValueSaveLocation().SizeValue() - method_ptr_size;
421 DCHECK_EQ(padding,
422 (GetReturnType() == Primitive::kPrimLong || GetReturnType() == Primitive::kPrimDouble)
423 ? 4u
424 : 0u);
425 total_size += padding;
426 total_size += SizeOfReturnValue();
427 }
428
429 return RoundUp(total_size, kStackAlignment);
430 }
431
OutFrameSize() const432 size_t ArmJniCallingConvention::OutFrameSize() const {
433 // Count param args, including JNIEnv* and jclass*; count 8-byte args twice.
434 size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs();
435 // Account for arguments passed through r0-r3. (No FP args, AAPCS32 is soft-float.)
436 size_t stack_args = all_args - std::min(kJniArgumentRegisterCount, all_args);
437 // The size of outgoing arguments.
438 size_t size = stack_args * kFramePointerSize + padding_;
439
440 // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
441 static_assert((kCoreCalleeSpillMask & ~kAapcsCoreCalleeSpillMask) == 0u);
442 static_assert((kFpCalleeSpillMask & ~kAapcsFpCalleeSpillMask) == 0u);
443
444 // For @CriticalNative, we can make a tail call if there are no stack args and the
445 // return type is not an FP type (otherwise we need to move the result to FP register).
446 DCHECK(!RequiresSmallResultTypeExtension());
447 if (is_critical_native_ && (size != 0u || GetShorty()[0] == 'F' || GetShorty()[0] == 'D')) {
448 size += kFramePointerSize; // We need to spill LR with the args.
449 }
450 size_t out_args_size = RoundUp(size, kAapcsStackAlignment);
451 if (UNLIKELY(IsCriticalNative())) {
452 DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u));
453 }
454 return out_args_size;
455 }
456
CalleeSaveRegisters() const457 ArrayRef<const ManagedRegister> ArmJniCallingConvention::CalleeSaveRegisters() const {
458 if (UNLIKELY(IsCriticalNative())) {
459 if (UseTailCall()) {
460 return ArrayRef<const ManagedRegister>(); // Do not spill anything.
461 } else {
462 // Spill LR with out args.
463 static_assert((kCoreCalleeSpillMask >> LR) == 1u); // Contains LR as the highest bit.
464 constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u;
465 static_assert(kCalleeSaveRegisters[lr_index].Equals(
466 ArmManagedRegister::FromCoreRegister(LR)));
467 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
468 /*pos*/ lr_index, /*length=*/ 1u);
469 }
470 } else {
471 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
472 }
473 }
474
475 // JniCallingConvention ABI follows AAPCS where longs and doubles must occur
476 // in even register numbers and stack slots
Next()477 void ArmJniCallingConvention::Next() {
478 // Update the iterator by usual JNI rules.
479 JniCallingConvention::Next();
480
481 if (LIKELY(HasNext())) { // Avoid CHECK failure for IsCurrentParam
482 // Ensure slot is 8-byte aligned for longs/doubles (AAPCS).
483 if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) {
484 // itr_slots_ needs to be an even number, according to AAPCS.
485 itr_slots_++;
486 }
487 }
488 }
489
IsCurrentParamInRegister()490 bool ArmJniCallingConvention::IsCurrentParamInRegister() {
491 return itr_slots_ < kJniArgumentRegisterCount;
492 }
493
IsCurrentParamOnStack()494 bool ArmJniCallingConvention::IsCurrentParamOnStack() {
495 return !IsCurrentParamInRegister();
496 }
497
CurrentParamRegister()498 ManagedRegister ArmJniCallingConvention::CurrentParamRegister() {
499 CHECK_LT(itr_slots_, kJniArgumentRegisterCount);
500 if (IsCurrentParamALongOrDouble()) {
501 // AAPCS 5.1.1 requires 64-bit values to be in a consecutive register pair:
502 // "A double-word sized type is passed in two consecutive registers (e.g., r0 and r1, or r2 and
503 // r3). The content of the registers is as if the value had been loaded from memory
504 // representation with a single LDM instruction."
505 if (itr_slots_ == 0u) {
506 return ArmManagedRegister::FromRegisterPair(R0_R1);
507 } else if (itr_slots_ == 2u) {
508 return ArmManagedRegister::FromRegisterPair(R2_R3);
509 } else {
510 // The register can either be R0 (+R1) or R2 (+R3). Cannot be other values.
511 LOG(FATAL) << "Invalid iterator register position for a long/double " << itr_args_;
512 UNREACHABLE();
513 }
514 } else {
515 // All other types can fit into one register.
516 return ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
517 }
518 }
519
CurrentParamStackOffset()520 FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
521 CHECK_GE(itr_slots_, kJniArgumentRegisterCount);
522 size_t offset =
523 displacement_.Int32Value()
524 - OutFrameSize()
525 + ((itr_slots_ - kJniArgumentRegisterCount) * kFramePointerSize);
526 CHECK_LT(offset, OutFrameSize());
527 return FrameOffset(offset);
528 }
529
HiddenArgumentRegister() const530 ManagedRegister ArmJniCallingConvention::HiddenArgumentRegister() const {
531 CHECK(IsCriticalNative());
532 // R4 is neither managed callee-save, nor argument register, nor scratch register.
533 // (It is native callee-save but the value coming from managed code can be clobbered.)
534 // TODO: Change to static_assert; std::none_of should be constexpr since C++20.
535 DCHECK(std::none_of(kCalleeSaveRegisters,
536 kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
537 [](ManagedRegister callee_save) constexpr {
538 return callee_save.Equals(ArmManagedRegister::FromCoreRegister(R4));
539 }));
540 DCHECK(std::none_of(kJniArgumentRegisters,
541 kJniArgumentRegisters + std::size(kJniArgumentRegisters),
542 [](Register reg) { return reg == R4; }));
543 return ArmManagedRegister::FromCoreRegister(R4);
544 }
545
546 // Whether to use tail call (used only for @CriticalNative).
UseTailCall() const547 bool ArmJniCallingConvention::UseTailCall() const {
548 CHECK(IsCriticalNative());
549 return OutFrameSize() == 0u;
550 }
551
552 } // namespace arm
553 } // namespace art
554