1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * Mterp entry point and support functions.
19  */
20 #include "nterp.h"
21 
22 #include "base/quasi_atomic.h"
23 #include "dex/dex_instruction_utils.h"
24 #include "debugger.h"
25 #include "entrypoints/entrypoint_utils-inl.h"
26 #include "interpreter/interpreter_common.h"
27 #include "interpreter/interpreter_intrinsics.h"
28 #include "interpreter/shadow_frame-inl.h"
29 #include "mirror/string-alloc-inl.h"
30 #include "nterp_helpers.h"
31 
32 namespace art {
33 namespace interpreter {
34 
IsNterpSupported()35 bool IsNterpSupported() {
36   return !kPoisonHeapReferences && kUseReadBarrier;
37 }
38 
CanRuntimeUseNterp()39 bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
40   Runtime* runtime = Runtime::Current();
41   instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
42   // Nterp shares the same restrictions as Mterp.
43   // If the runtime is interpreter only, we currently don't use nterp as some
44   // parts of the runtime (like instrumentation) make assumption on an
45   // interpreter-only runtime to always be in a switch-like interpreter.
46   return IsNterpSupported() && CanUseMterp() && !instr->InterpretOnly();
47 }
48 
GetNterpEntryPoint()49 const void* GetNterpEntryPoint() {
50   return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
51 }
52 
53 /*
54  * Verify some constants used by the nterp interpreter.
55  */
CheckNterpAsmConstants()56 void CheckNterpAsmConstants() {
57   /*
58    * If we're using computed goto instruction transitions, make sure
59    * none of the handlers overflows the byte limit.  This won't tell
60    * which one did, but if any one is too big the total size will
61    * overflow.
62    */
63   const int width = kMterpHandlerSize;
64   ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
65                           reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
66   if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
67       LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
68                  << "(did an instruction handler exceed " << width << " bytes?)";
69   }
70   static_assert(IsPowerOfTwo(kNterpHotnessMask + 1), "Hotness mask must be a (power of 2) - 1");
71   static_assert(IsPowerOfTwo(kTieredHotnessMask + 1),
72                 "Tiered hotness mask must be a (power of 2) - 1");
73 }
74 
UpdateHotness(ArtMethod * method)75 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
76   // The hotness we will add to a method when we perform a
77   // field/method/class/string lookup.
78   constexpr uint16_t kNterpHotnessLookup = 0xf;
79 
80   // Convert to uint32_t to handle uint16_t overflow.
81   uint32_t counter = method->GetCounter();
82   uint32_t new_counter = counter + kNterpHotnessLookup;
83   if (new_counter > kNterpHotnessMask) {
84     // Let the nterp code actually call the compilation: we want to make sure
85     // there's at least a second execution of the method or a back-edge to avoid
86     // compiling straightline initialization methods.
87     method->SetCounter(kNterpHotnessMask);
88   } else {
89     method->SetCounter(new_counter);
90   }
91 }
92 
93 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T value)94 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
95   DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
96   // For simplicity, only update the cache if weak ref accesses are enabled. If
97   // they are disabled, this means the GC is processing the cache, and is
98   // reading it concurrently.
99   if (self->GetWeakRefAccessEnabled()) {
100     self->GetInterpreterCache()->Set(dex_pc_ptr, value);
101   }
102 }
103 
104 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T * value)105 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T* value) {
106   UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
107 }
108 
109 #ifdef __arm__
110 
NterpStoreArm32Fprs(const char * shorty,uint32_t * registers,uint32_t * stack_args,const uint32_t * fprs)111 extern "C" void NterpStoreArm32Fprs(const char* shorty,
112                                     uint32_t* registers,
113                                     uint32_t* stack_args,
114                                     const uint32_t* fprs) {
115   // Note `shorty` has already the returned type removed.
116   ScopedAssertNoThreadSuspension sants("In nterp");
117   uint32_t arg_index = 0;
118   uint32_t fpr_double_index = 0;
119   uint32_t fpr_index = 0;
120   for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
121     char arg_type = shorty[shorty_index];
122     switch (arg_type) {
123       case 'D': {
124         // Double should not overlap with float.
125         fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
126         if (fpr_double_index < 16) {
127           registers[arg_index] = fprs[fpr_double_index++];
128           registers[arg_index + 1] = fprs[fpr_double_index++];
129         } else {
130           registers[arg_index] = stack_args[arg_index];
131           registers[arg_index + 1] = stack_args[arg_index + 1];
132         }
133         arg_index += 2;
134         break;
135       }
136       case 'F': {
137         if (fpr_index % 2 == 0) {
138           fpr_index = std::max(fpr_double_index, fpr_index);
139         }
140         if (fpr_index < 16) {
141           registers[arg_index] = fprs[fpr_index++];
142         } else {
143           registers[arg_index] = stack_args[arg_index];
144         }
145         arg_index++;
146         break;
147       }
148       case 'J': {
149         arg_index += 2;
150         break;
151       }
152       default: {
153         arg_index++;
154         break;
155       }
156     }
157   }
158 }
159 
NterpSetupArm32Fprs(const char * shorty,uint32_t dex_register,uint32_t stack_index,uint32_t * fprs,uint32_t * registers,uint32_t * stack_args)160 extern "C" void NterpSetupArm32Fprs(const char* shorty,
161                                     uint32_t dex_register,
162                                     uint32_t stack_index,
163                                     uint32_t* fprs,
164                                     uint32_t* registers,
165                                     uint32_t* stack_args) {
166   // Note `shorty` has already the returned type removed.
167   ScopedAssertNoThreadSuspension sants("In nterp");
168   uint32_t fpr_double_index = 0;
169   uint32_t fpr_index = 0;
170   for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
171     char arg_type = shorty[shorty_index];
172     switch (arg_type) {
173       case 'D': {
174         // Double should not overlap with float.
175         fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
176         if (fpr_double_index < 16) {
177           fprs[fpr_double_index++] = registers[dex_register++];
178           fprs[fpr_double_index++] = registers[dex_register++];
179           stack_index += 2;
180         } else {
181           stack_args[stack_index++] = registers[dex_register++];
182           stack_args[stack_index++] = registers[dex_register++];
183         }
184         break;
185       }
186       case 'F': {
187         if (fpr_index % 2 == 0) {
188           fpr_index = std::max(fpr_double_index, fpr_index);
189         }
190         if (fpr_index < 16) {
191           fprs[fpr_index++] = registers[dex_register++];
192           stack_index++;
193         } else {
194           stack_args[stack_index++] = registers[dex_register++];
195         }
196         break;
197       }
198       case 'J': {
199         stack_index += 2;
200         dex_register += 2;
201         break;
202       }
203       default: {
204         stack_index++;
205         dex_register++;
206         break;
207       }
208     }
209   }
210 }
211 
212 #endif
213 
NterpGetCodeItem(ArtMethod * method)214 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
215     REQUIRES_SHARED(Locks::mutator_lock_) {
216   ScopedAssertNoThreadSuspension sants("In nterp");
217   return method->GetCodeItem();
218 }
219 
NterpGetShorty(ArtMethod * method)220 extern "C" const char* NterpGetShorty(ArtMethod* method)
221     REQUIRES_SHARED(Locks::mutator_lock_) {
222   ScopedAssertNoThreadSuspension sants("In nterp");
223   return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
224 }
225 
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)226 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
227     REQUIRES_SHARED(Locks::mutator_lock_) {
228   ScopedAssertNoThreadSuspension sants("In nterp");
229   return caller->GetDexFile()->GetMethodShorty(method_index);
230 }
231 
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)232 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
233     REQUIRES_SHARED(Locks::mutator_lock_) {
234   ScopedAssertNoThreadSuspension sants("In nterp");
235   const Instruction* inst = Instruction::At(dex_pc_ptr);
236   dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
237       ? inst->VRegH_45cc()
238       : inst->VRegH_4rcc());
239   return caller->GetDexFile()->GetShorty(proto_idx);
240 }
241 
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)242 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
243     REQUIRES_SHARED(Locks::mutator_lock_) {
244   ScopedAssertNoThreadSuspension sants("In nterp");
245   const Instruction* inst = Instruction::At(dex_pc_ptr);
246   uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
247       ? inst->VRegB_35c()
248       : inst->VRegB_3rc());
249   const DexFile* dex_file = caller->GetDexFile();
250   dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
251   return dex_file->GetShorty(proto_idx);
252 }
253 
NterpGetMethod(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)254 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
255     REQUIRES_SHARED(Locks::mutator_lock_) {
256   UpdateHotness(caller);
257   const Instruction* inst = Instruction::At(dex_pc_ptr);
258   InvokeType invoke_type = kStatic;
259   uint16_t method_index = 0;
260   switch (inst->Opcode()) {
261     case Instruction::INVOKE_DIRECT: {
262       method_index = inst->VRegB_35c();
263       invoke_type = kDirect;
264       break;
265     }
266 
267     case Instruction::INVOKE_INTERFACE: {
268       method_index = inst->VRegB_35c();
269       invoke_type = kInterface;
270       break;
271     }
272 
273     case Instruction::INVOKE_STATIC: {
274       method_index = inst->VRegB_35c();
275       invoke_type = kStatic;
276       break;
277     }
278 
279     case Instruction::INVOKE_SUPER: {
280       method_index = inst->VRegB_35c();
281       invoke_type = kSuper;
282       break;
283     }
284     case Instruction::INVOKE_VIRTUAL: {
285       method_index = inst->VRegB_35c();
286       invoke_type = kVirtual;
287       break;
288     }
289 
290     case Instruction::INVOKE_DIRECT_RANGE: {
291       method_index = inst->VRegB_3rc();
292       invoke_type = kDirect;
293       break;
294     }
295 
296     case Instruction::INVOKE_INTERFACE_RANGE: {
297       method_index = inst->VRegB_3rc();
298       invoke_type = kInterface;
299       break;
300     }
301 
302     case Instruction::INVOKE_STATIC_RANGE: {
303       method_index = inst->VRegB_3rc();
304       invoke_type = kStatic;
305       break;
306     }
307 
308     case Instruction::INVOKE_SUPER_RANGE: {
309       method_index = inst->VRegB_3rc();
310       invoke_type = kSuper;
311       break;
312     }
313 
314     case Instruction::INVOKE_VIRTUAL_RANGE: {
315       method_index = inst->VRegB_3rc();
316       invoke_type = kVirtual;
317       break;
318     }
319 
320     default:
321       LOG(FATAL) << "Unknown instruction " << inst->Opcode();
322   }
323 
324   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
325   ArtMethod* resolved_method = caller->SkipAccessChecks()
326       ? class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
327             self, method_index, caller, invoke_type)
328       : class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
329             self, method_index, caller, invoke_type);
330   if (resolved_method == nullptr) {
331     DCHECK(self->IsExceptionPending());
332     return 0;
333   }
334 
335   // ResolveMethod returns the method based on the method_id. For super invokes
336   // we must use the executing class's context to find the right method.
337   if (invoke_type == kSuper) {
338     ObjPtr<mirror::Class> executing_class = caller->GetDeclaringClass();
339     ObjPtr<mirror::Class> referenced_class = class_linker->LookupResolvedType(
340         executing_class->GetDexFile().GetMethodId(method_index).class_idx_,
341         executing_class->GetDexCache(),
342         executing_class->GetClassLoader());
343     DCHECK(referenced_class != nullptr);  // We have already resolved a method from this class.
344     if (!referenced_class->IsAssignableFrom(executing_class)) {
345       // We cannot determine the target method.
346       ThrowNoSuchMethodError(invoke_type,
347                              resolved_method->GetDeclaringClass(),
348                              resolved_method->GetName(),
349                              resolved_method->GetSignature());
350       return 0;
351     }
352     if (referenced_class->IsInterface()) {
353       resolved_method = referenced_class->FindVirtualMethodForInterfaceSuper(
354           resolved_method, class_linker->GetImagePointerSize());
355     } else {
356       uint16_t vtable_index = resolved_method->GetMethodIndex();
357       ObjPtr<mirror::Class> super_class = executing_class->GetSuperClass();
358       if (super_class == nullptr ||
359           !super_class->HasVTable() ||
360           vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
361         // Behavior to agree with that of the verifier.
362         ThrowNoSuchMethodError(invoke_type,
363                                resolved_method->GetDeclaringClass(),
364                                resolved_method->GetName(),
365                                resolved_method->GetSignature());
366         return 0;
367       } else {
368         resolved_method = executing_class->GetSuperClass()->GetVTableEntry(
369             vtable_index, class_linker->GetImagePointerSize());
370       }
371     }
372   }
373 
374   if (invoke_type == kInterface) {
375     size_t result = 0u;
376     if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
377       // Set the low bit to notify the interpreter it should do a vtable call.
378       DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
379       result = (resolved_method->GetMethodIndex() << 16) | 1U;
380     } else {
381       DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
382       DCHECK(!resolved_method->IsCopied());
383       if (!resolved_method->IsAbstract()) {
384         // Set the second bit to notify the interpreter this is a default
385         // method.
386         result = reinterpret_cast<size_t>(resolved_method) | 2U;
387       } else {
388         result = reinterpret_cast<size_t>(resolved_method);
389       }
390     }
391     UpdateCache(self, dex_pc_ptr, result);
392     return result;
393   } else if (resolved_method->GetDeclaringClass()->IsStringClass()
394              && !resolved_method->IsStatic()
395              && resolved_method->IsConstructor()) {
396     CHECK_NE(invoke_type, kSuper);
397     resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
398     // Or the result with 1 to notify to nterp this is a string init method. We
399     // also don't cache the result as we don't want nterp to have its fast path always
400     // check for it, and we expect a lot more regular calls than string init
401     // calls.
402     return reinterpret_cast<size_t>(resolved_method) | 1;
403   } else if (invoke_type == kVirtual) {
404     UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
405     return resolved_method->GetMethodIndex();
406   } else {
407     UpdateCache(self, dex_pc_ptr, resolved_method);
408     return reinterpret_cast<size_t>(resolved_method);
409   }
410 }
411 
ResolveFieldWithAccessChecks(Thread * self,ClassLinker * class_linker,uint16_t field_index,ArtMethod * caller,bool is_static,bool is_put,size_t resolve_field_type)412 static ArtField* ResolveFieldWithAccessChecks(Thread* self,
413                                               ClassLinker* class_linker,
414                                               uint16_t field_index,
415                                               ArtMethod* caller,
416                                               bool is_static,
417                                               bool is_put,
418                                               size_t resolve_field_type)  // Resolve if not zero
419     REQUIRES_SHARED(Locks::mutator_lock_) {
420   if (caller->SkipAccessChecks()) {
421     return class_linker->ResolveField(field_index, caller, is_static);
422   }
423 
424   caller = caller->GetInterfaceMethodIfProxy(kRuntimePointerSize);
425 
426   StackHandleScope<2> hs(self);
427   Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(caller->GetDexCache()));
428   Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(caller->GetClassLoader()));
429 
430   ArtField* resolved_field = class_linker->ResolveFieldJLS(field_index,
431                                                            h_dex_cache,
432                                                            h_class_loader);
433   if (resolved_field == nullptr) {
434     return nullptr;
435   }
436 
437   ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
438   if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
439     ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, caller);
440     return nullptr;
441   }
442   ObjPtr<mirror::Class> referring_class = caller->GetDeclaringClass();
443   if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
444                                                           resolved_field,
445                                                           caller->GetDexCache(),
446                                                           field_index))) {
447     return nullptr;
448   }
449   if (UNLIKELY(is_put && resolved_field->IsFinal() && (fields_class != referring_class))) {
450     ThrowIllegalAccessErrorFinalField(caller, resolved_field);
451     return nullptr;
452   }
453   if (resolve_field_type != 0u && resolved_field->ResolveType() == nullptr) {
454     DCHECK(self->IsExceptionPending());
455     return nullptr;
456   }
457   return resolved_field;
458 }
459 
NterpGetStaticField(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,size_t resolve_field_type)460 extern "C" size_t NterpGetStaticField(Thread* self,
461                                       ArtMethod* caller,
462                                       uint16_t* dex_pc_ptr,
463                                       size_t resolve_field_type)  // Resolve if not zero
464     REQUIRES_SHARED(Locks::mutator_lock_) {
465   UpdateHotness(caller);
466   const Instruction* inst = Instruction::At(dex_pc_ptr);
467   uint16_t field_index = inst->VRegB_21c();
468   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
469   ArtField* resolved_field = ResolveFieldWithAccessChecks(
470       self,
471       class_linker,
472       field_index,
473       caller,
474       /* is_static */ true,
475       /* is_put */ IsInstructionSPut(inst->Opcode()),
476       resolve_field_type);
477 
478   if (resolved_field == nullptr) {
479     DCHECK(self->IsExceptionPending());
480     return 0;
481   }
482   if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
483     StackHandleScope<1> hs(self);
484     Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
485     if (UNLIKELY(!class_linker->EnsureInitialized(
486                       self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
487       DCHECK(self->IsExceptionPending());
488       return 0;
489     }
490     DCHECK(h_class->IsInitializing());
491   }
492   if (resolved_field->IsVolatile()) {
493     // Or the result with 1 to notify to nterp this is a volatile field. We
494     // also don't cache the result as we don't want nterp to have its fast path always
495     // check for it.
496     return reinterpret_cast<size_t>(resolved_field) | 1;
497   } else {
498     UpdateCache(self, dex_pc_ptr, resolved_field);
499     return reinterpret_cast<size_t>(resolved_field);
500   }
501 }
502 
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,size_t resolve_field_type)503 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
504                                                 ArtMethod* caller,
505                                                 uint16_t* dex_pc_ptr,
506                                                 size_t resolve_field_type)  // Resolve if not zero
507     REQUIRES_SHARED(Locks::mutator_lock_) {
508   UpdateHotness(caller);
509   const Instruction* inst = Instruction::At(dex_pc_ptr);
510   uint16_t field_index = inst->VRegC_22c();
511   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
512   ArtField* resolved_field = ResolveFieldWithAccessChecks(
513       self,
514       class_linker,
515       field_index,
516       caller,
517       /* is_static */ false,
518       /* is_put */ IsInstructionIPut(inst->Opcode()),
519       resolve_field_type);
520   if (resolved_field == nullptr) {
521     DCHECK(self->IsExceptionPending());
522     return 0;
523   }
524   if (resolved_field->IsVolatile()) {
525     // Don't cache for a volatile field, and return a negative offset as marker
526     // of volatile.
527     return -resolved_field->GetOffset().Uint32Value();
528   }
529   UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
530   return resolved_field->GetOffset().Uint32Value();
531 }
532 
NterpGetClassOrAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)533 extern "C" mirror::Object* NterpGetClassOrAllocateObject(Thread* self,
534                                                          ArtMethod* caller,
535                                                          uint16_t* dex_pc_ptr)
536     REQUIRES_SHARED(Locks::mutator_lock_) {
537   UpdateHotness(caller);
538   const Instruction* inst = Instruction::At(dex_pc_ptr);
539   dex::TypeIndex index;
540   switch (inst->Opcode()) {
541     case Instruction::NEW_INSTANCE:
542       index = dex::TypeIndex(inst->VRegB_21c());
543       break;
544     case Instruction::CHECK_CAST:
545       index = dex::TypeIndex(inst->VRegB_21c());
546       break;
547     case Instruction::INSTANCE_OF:
548       index = dex::TypeIndex(inst->VRegC_22c());
549       break;
550     case Instruction::CONST_CLASS:
551       index = dex::TypeIndex(inst->VRegB_21c());
552       break;
553     case Instruction::NEW_ARRAY:
554       index = dex::TypeIndex(inst->VRegC_22c());
555       break;
556     default:
557       LOG(FATAL) << "Unreachable";
558   }
559   ObjPtr<mirror::Class> c =
560       ResolveVerifyAndClinit(index,
561                              caller,
562                              self,
563                              /* can_run_clinit= */ false,
564                              /* verify_access= */ !caller->SkipAccessChecks());
565   if (c == nullptr) {
566     DCHECK(self->IsExceptionPending());
567     return nullptr;
568   }
569 
570   if (inst->Opcode() == Instruction::NEW_INSTANCE) {
571     gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
572     if (UNLIKELY(c->IsStringClass())) {
573       // We don't cache the class for strings as we need to special case their
574       // allocation.
575       return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
576     } else {
577       if (!c->IsFinalizable() && c->IsInstantiable()) {
578         // Cache non-finalizable classes for next calls.
579         UpdateCache(self, dex_pc_ptr, c.Ptr());
580       }
581       return AllocObjectFromCode(c, self, allocator_type).Ptr();
582     }
583   } else {
584     // For all other cases, cache the class.
585     UpdateCache(self, dex_pc_ptr, c.Ptr());
586   }
587   return c.Ptr();
588 }
589 
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)590 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
591     REQUIRES_SHARED(Locks::mutator_lock_) {
592   const Instruction* inst = Instruction::At(dex_pc_ptr);
593   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
594   switch (inst->Opcode()) {
595     case Instruction::CONST_STRING:
596     case Instruction::CONST_STRING_JUMBO: {
597       UpdateHotness(caller);
598       dex::StringIndex string_index(
599           (inst->Opcode() == Instruction::CONST_STRING)
600               ? inst->VRegB_21c()
601               : inst->VRegB_31c());
602       ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
603       if (str == nullptr) {
604         DCHECK(self->IsExceptionPending());
605         return nullptr;
606       }
607       UpdateCache(self, dex_pc_ptr, str.Ptr());
608       return str.Ptr();
609     }
610     case Instruction::CONST_METHOD_HANDLE: {
611       // Don't cache: we don't expect this to be performance sensitive, and we
612       // don't want the cache to conflict with a performance sensitive entry.
613       return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
614     }
615     case Instruction::CONST_METHOD_TYPE: {
616       // Don't cache: we don't expect this to be performance sensitive, and we
617       // don't want the cache to conflict with a performance sensitive entry.
618       return class_linker->ResolveMethodType(
619           self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
620     }
621     default:
622       LOG(FATAL) << "Unreachable";
623   }
624   return nullptr;
625 }
626 
NterpUnimplemented()627 extern "C" void NterpUnimplemented() {
628   LOG(FATAL) << "Unimplemented";
629 }
630 
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,uint32_t * regs,bool is_range)631 static mirror::Object* DoFilledNewArray(Thread* self,
632                                         ArtMethod* caller,
633                                         uint16_t* dex_pc_ptr,
634                                         uint32_t* regs,
635                                         bool is_range)
636     REQUIRES_SHARED(Locks::mutator_lock_) {
637   const Instruction* inst = Instruction::At(dex_pc_ptr);
638   if (kIsDebugBuild) {
639     if (is_range) {
640       DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
641     } else {
642       DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
643     }
644   }
645   const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
646   DCHECK_GE(length, 0);
647   if (!is_range) {
648     // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
649     DCHECK_LE(length, 5);
650   }
651   uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
652   ObjPtr<mirror::Class> array_class =
653       ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
654                              caller,
655                              self,
656                              /* can_run_clinit= */ true,
657                              /* verify_access= */ !caller->SkipAccessChecks());
658   if (UNLIKELY(array_class == nullptr)) {
659     DCHECK(self->IsExceptionPending());
660     return nullptr;
661   }
662   DCHECK(array_class->IsArrayClass());
663   ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
664   const bool is_primitive_int_component = component_class->IsPrimitiveInt();
665   if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
666     if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
667       ThrowRuntimeException("Bad filled array request for type %s",
668                             component_class->PrettyDescriptor().c_str());
669     } else {
670       self->ThrowNewExceptionF(
671           "Ljava/lang/InternalError;",
672           "Found type %s; filled-new-array not implemented for anything but 'int'",
673           component_class->PrettyDescriptor().c_str());
674     }
675     return nullptr;
676   }
677   ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
678       self,
679       array_class,
680       length,
681       array_class->GetComponentSizeShift(),
682       Runtime::Current()->GetHeap()->GetCurrentAllocator());
683   if (UNLIKELY(new_array == nullptr)) {
684     self->AssertPendingOOMException();
685     return nullptr;
686   }
687   uint32_t arg[Instruction::kMaxVarArgRegs];  // only used in filled-new-array.
688   uint32_t vregC = 0;   // only used in filled-new-array-range.
689   if (is_range) {
690     vregC = inst->VRegC_3rc();
691   } else {
692     inst->GetVarArgs(arg);
693   }
694   for (int32_t i = 0; i < length; ++i) {
695     size_t src_reg = is_range ? vregC + i : arg[i];
696     if (is_primitive_int_component) {
697       new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
698     } else {
699       new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
700           i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
701     }
702   }
703   return new_array.Ptr();
704 }
705 
NterpFilledNewArray(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)706 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
707                                                ArtMethod* caller,
708                                                uint32_t* registers,
709                                                uint16_t* dex_pc_ptr)
710     REQUIRES_SHARED(Locks::mutator_lock_) {
711   return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
712 }
713 
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)714 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
715                                                     ArtMethod* caller,
716                                                     uint32_t* registers,
717                                                     uint16_t* dex_pc_ptr)
718     REQUIRES_SHARED(Locks::mutator_lock_) {
719   return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
720 }
721 
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)722 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
723     REQUIRES_SHARED(Locks::mutator_lock_) {
724   ScopedAssertNoThreadSuspension sants("In nterp");
725   jit::Jit* jit = Runtime::Current()->GetJit();
726   if (jit != nullptr && jit->UseJitCompilation()) {
727     // Nterp passes null on entry where we don't want to OSR.
728     if (dex_pc_ptr != nullptr) {
729       // This could be a loop back edge, check if we can OSR.
730       CodeItemInstructionAccessor accessor(method->DexInstructions());
731       uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
732       jit::OsrData* osr_data = jit->PrepareForOsr(
733           method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
734       if (osr_data != nullptr) {
735         return osr_data;
736       }
737     }
738     jit->EnqueueCompilationFromNterp(method, Thread::Current());
739   }
740   return nullptr;
741 }
742 
743 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal);
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)744 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
745     REQUIRES_SHARED(Locks::mutator_lock_) {
746   ScopedAssertNoThreadSuspension sants("In nterp");
747   return MterpDoPackedSwitch(switchData, testVal);
748 }
749 
750 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal);
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)751 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
752     REQUIRES_SHARED(Locks::mutator_lock_) {
753   ScopedAssertNoThreadSuspension sants("In nterp");
754   return MterpDoSparseSwitch(switchData, testVal);
755 }
756 
757 }  // namespace interpreter
758 }  // namespace art
759