/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_INL_H_ #define ART_RUNTIME_MIRROR_DEX_CACHE_INL_H_ #include "dex_cache.h" #include #include "art_field.h" #include "art_method.h" #include "base/atomic_pair.h" #include "base/casts.h" #include "base/enums.h" #include "class_linker.h" #include "dex/dex_file.h" #include "gc_root-inl.h" #include "linear_alloc.h" #include "mirror/call_site.h" #include "mirror/class.h" #include "mirror/method_type.h" #include "obj_ptr.h" #include "object-inl.h" #include "runtime.h" #include "write_barrier-inl.h" #include namespace art { namespace mirror { template static void InitializeArray(std::atomic* array) { DexCachePair::Initialize(array); } template static void InitializeArray(GcRoot*) { // No special initialization is needed. } template T* DexCache::AllocArray(MemberOffset obj_offset, MemberOffset num_offset, size_t num) { num = std::min(num, kMaxCacheSize); if (num == 0) { return nullptr; } mirror::DexCache* dex_cache = this; if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { // Several code paths use DexCache without read-barrier for performance. // We have to check the "to-space" object here to avoid allocating twice. dex_cache = reinterpret_cast(ReadBarrier::Mark(dex_cache)); } Thread* self = Thread::Current(); ClassLinker* linker = Runtime::Current()->GetClassLinker(); LinearAlloc* alloc = linker->GetOrCreateAllocatorForClassLoader(GetClassLoader()); MutexLock mu(self, *Locks::dex_cache_lock_); // Avoid allocation by multiple threads. T* array = dex_cache->GetFieldPtr64(obj_offset); if (array != nullptr) { DCHECK(alloc->Contains(array)); return array; // Other thread just allocated the array. } array = reinterpret_cast(alloc->AllocAlign16(self, RoundUp(num * sizeof(T), 16))); InitializeArray(array); // Ensure other threads see the array initialized. dex_cache->SetField32Volatile(num_offset, num); dex_cache->SetField64Volatile(obj_offset, reinterpret_cast64(array)); return array; } template inline DexCachePair::DexCachePair(ObjPtr object, uint32_t index) : object(object), index(index) {} template inline void DexCachePair::Initialize(std::atomic>* dex_cache) { DexCachePair first_elem; first_elem.object = GcRoot(nullptr); first_elem.index = InvalidIndexForSlot(0); dex_cache[0].store(first_elem, std::memory_order_relaxed); } template inline T* DexCachePair::GetObjectForIndex(uint32_t idx) { if (idx != index) { return nullptr; } DCHECK(!object.IsNull()); return object.Read(); } template inline void NativeDexCachePair::Initialize(std::atomic>* dex_cache) { NativeDexCachePair first_elem; first_elem.object = nullptr; first_elem.index = InvalidIndexForSlot(0); DexCache::SetNativePair(dex_cache, 0, first_elem); } inline uint32_t DexCache::ClassSize(PointerSize pointer_size) { const uint32_t vtable_entries = Object::kVTableLength; return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) { DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds()); const uint32_t slot_idx = string_idx.index_ % kDexCacheStringCacheSize; DCHECK_LT(slot_idx, NumStrings()); return slot_idx; } inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) { StringDexCacheType* strings = GetStrings(); if (UNLIKELY(strings == nullptr)) { return nullptr; } return strings[StringSlotIndex(string_idx)].load( std::memory_order_relaxed).GetObjectForIndex(string_idx.index_); } inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr resolved) { DCHECK(resolved != nullptr); StringDexCacheType* strings = GetStrings(); if (UNLIKELY(strings == nullptr)) { strings = AllocArray( StringsOffset(), NumStringsOffset(), GetDexFile()->NumStringIds()); } strings[StringSlotIndex(string_idx)].store( StringDexCachePair(resolved, string_idx.index_), std::memory_order_relaxed); Runtime* const runtime = Runtime::Current(); if (UNLIKELY(runtime->IsActiveTransaction())) { DCHECK(runtime->IsAotCompiler()); runtime->RecordResolveString(this, string_idx); } // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); } inline void DexCache::ClearString(dex::StringIndex string_idx) { DCHECK(Runtime::Current()->IsAotCompiler()); uint32_t slot_idx = StringSlotIndex(string_idx); StringDexCacheType* strings = GetStrings(); if (UNLIKELY(strings == nullptr)) { return; } StringDexCacheType* slot = &strings[slot_idx]; // This is racy but should only be called from the transactional interpreter. if (slot->load(std::memory_order_relaxed).index == string_idx.index_) { StringDexCachePair cleared(nullptr, StringDexCachePair::InvalidIndexForSlot(slot_idx)); slot->store(cleared, std::memory_order_relaxed); } } inline uint32_t DexCache::TypeSlotIndex(dex::TypeIndex type_idx) { DCHECK_LT(type_idx.index_, GetDexFile()->NumTypeIds()); const uint32_t slot_idx = type_idx.index_ % kDexCacheTypeCacheSize; DCHECK_LT(slot_idx, NumResolvedTypes()); return slot_idx; } inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) { // It is theorized that a load acquire is not required since obtaining the resolved class will // always have an address dependency or a lock. TypeDexCacheType* resolved_types = GetResolvedTypes(); if (UNLIKELY(resolved_types == nullptr)) { return nullptr; } return resolved_types[TypeSlotIndex(type_idx)].load( std::memory_order_relaxed).GetObjectForIndex(type_idx.index_); } inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr resolved) { DCHECK(resolved != nullptr); DCHECK(resolved->IsResolved()) << resolved->GetStatus(); TypeDexCacheType* resolved_types = GetResolvedTypes(); if (UNLIKELY(resolved_types == nullptr)) { resolved_types = AllocArray( ResolvedTypesOffset(), NumResolvedTypesOffset(), GetDexFile()->NumTypeIds()); } // TODO default transaction support. // Use a release store for SetResolvedType. This is done to prevent other threads from seeing a // class but not necessarily seeing the loaded members like the static fields array. // See b/32075261. resolved_types[TypeSlotIndex(type_idx)].store( TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release); // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); } inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) { DCHECK(Runtime::Current()->IsAotCompiler()); TypeDexCacheType* resolved_types = GetResolvedTypes(); if (UNLIKELY(resolved_types == nullptr)) { return; } uint32_t slot_idx = TypeSlotIndex(type_idx); TypeDexCacheType* slot = &resolved_types[slot_idx]; // This is racy but should only be called from the single-threaded ImageWriter and tests. if (slot->load(std::memory_order_relaxed).index == type_idx.index_) { TypeDexCachePair cleared(nullptr, TypeDexCachePair::InvalidIndexForSlot(slot_idx)); slot->store(cleared, std::memory_order_relaxed); } } inline uint32_t DexCache::MethodTypeSlotIndex(dex::ProtoIndex proto_idx) { DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); DCHECK_LT(proto_idx.index_, GetDexFile()->NumProtoIds()); const uint32_t slot_idx = proto_idx.index_ % kDexCacheMethodTypeCacheSize; DCHECK_LT(slot_idx, NumResolvedMethodTypes()); return slot_idx; } inline MethodType* DexCache::GetResolvedMethodType(dex::ProtoIndex proto_idx) { MethodTypeDexCacheType* methods = GetResolvedMethodTypes(); if (UNLIKELY(methods == nullptr)) { return nullptr; } return methods[MethodTypeSlotIndex(proto_idx)].load( std::memory_order_relaxed).GetObjectForIndex(proto_idx.index_); } inline void DexCache::SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved) { DCHECK(resolved != nullptr); MethodTypeDexCacheType* methods = GetResolvedMethodTypes(); if (UNLIKELY(methods == nullptr)) { methods = AllocArray( ResolvedMethodTypesOffset(), NumResolvedMethodTypesOffset(), GetDexFile()->NumProtoIds()); } methods[MethodTypeSlotIndex(proto_idx)].store( MethodTypeDexCachePair(resolved, proto_idx.index_), std::memory_order_relaxed); Runtime* const runtime = Runtime::Current(); if (UNLIKELY(runtime->IsActiveTransaction())) { DCHECK(runtime->IsAotCompiler()); runtime->RecordResolveMethodType(this, proto_idx); } // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); } inline void DexCache::ClearMethodType(dex::ProtoIndex proto_idx) { DCHECK(Runtime::Current()->IsAotCompiler()); uint32_t slot_idx = MethodTypeSlotIndex(proto_idx); MethodTypeDexCacheType* slot = &GetResolvedMethodTypes()[slot_idx]; // This is racy but should only be called from the transactional interpreter. if (slot->load(std::memory_order_relaxed).index == proto_idx.index_) { MethodTypeDexCachePair cleared(nullptr, MethodTypeDexCachePair::InvalidIndexForSlot(proto_idx.index_)); slot->store(cleared, std::memory_order_relaxed); } } inline CallSite* DexCache::GetResolvedCallSite(uint32_t call_site_idx) { DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); DCHECK_LT(call_site_idx, GetDexFile()->NumCallSiteIds()); GcRoot* call_sites = GetResolvedCallSites(); if (UNLIKELY(call_sites == nullptr)) { return nullptr; } GcRoot& target = call_sites[call_site_idx]; Atomic>& ref = reinterpret_cast>&>(target); return ref.load(std::memory_order_seq_cst).Read(); } inline ObjPtr DexCache::SetResolvedCallSite(uint32_t call_site_idx, ObjPtr call_site) { DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); DCHECK_LT(call_site_idx, GetDexFile()->NumCallSiteIds()); GcRoot null_call_site(nullptr); GcRoot candidate(call_site); GcRoot* call_sites = GetResolvedCallSites(); if (UNLIKELY(call_sites == nullptr)) { call_sites = AllocArray, std::numeric_limits::max()>( ResolvedCallSitesOffset(), NumResolvedCallSitesOffset(), GetDexFile()->NumCallSiteIds()); } GcRoot& target = call_sites[call_site_idx]; // The first assignment for a given call site wins. Atomic>& ref = reinterpret_cast>&>(target); if (ref.CompareAndSetStrongSequentiallyConsistent(null_call_site, candidate)) { // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); return call_site; } else { return target.Read(); } } inline uint32_t DexCache::FieldSlotIndex(uint32_t field_idx) { DCHECK_LT(field_idx, GetDexFile()->NumFieldIds()); const uint32_t slot_idx = field_idx % kDexCacheFieldCacheSize; DCHECK_LT(slot_idx, NumResolvedFields()); return slot_idx; } inline ArtField* DexCache::GetResolvedField(uint32_t field_idx) { FieldDexCacheType* fields = GetResolvedFields(); if (UNLIKELY(fields == nullptr)) { return nullptr; } auto pair = GetNativePair(fields, FieldSlotIndex(field_idx)); return pair.GetObjectForIndex(field_idx); } inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field) { DCHECK(field != nullptr); FieldDexCachePair pair(field, field_idx); FieldDexCacheType* fields = GetResolvedFields(); if (UNLIKELY(fields == nullptr)) { fields = AllocArray( ResolvedFieldsOffset(), NumResolvedFieldsOffset(), GetDexFile()->NumFieldIds()); } SetNativePair(fields, FieldSlotIndex(field_idx), pair); } inline uint32_t DexCache::MethodSlotIndex(uint32_t method_idx) { DCHECK_LT(method_idx, GetDexFile()->NumMethodIds()); const uint32_t slot_idx = method_idx % kDexCacheMethodCacheSize; DCHECK_LT(slot_idx, NumResolvedMethods()); return slot_idx; } inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx) { MethodDexCacheType* methods = GetResolvedMethods(); if (UNLIKELY(methods == nullptr)) { return nullptr; } auto pair = GetNativePair(methods, MethodSlotIndex(method_idx)); return pair.GetObjectForIndex(method_idx); } inline void DexCache::SetResolvedMethod(uint32_t method_idx, ArtMethod* method) { DCHECK(method != nullptr); MethodDexCachePair pair(method, method_idx); MethodDexCacheType* methods = GetResolvedMethods(); if (UNLIKELY(methods == nullptr)) { methods = AllocArray( ResolvedMethodsOffset(), NumResolvedMethodsOffset(), GetDexFile()->NumMethodIds()); } SetNativePair(methods, MethodSlotIndex(method_idx), pair); } template NativeDexCachePair DexCache::GetNativePair(std::atomic>* pair_array, size_t idx) { auto* array = reinterpret_cast>*>(pair_array); AtomicPair value = AtomicPairLoadAcquire(&array[idx]); return NativeDexCachePair(reinterpret_cast(value.first), value.second); } template void DexCache::SetNativePair(std::atomic>* pair_array, size_t idx, NativeDexCachePair pair) { auto* array = reinterpret_cast>*>(pair_array); AtomicPair v(reinterpret_cast(pair.object), pair.index); AtomicPairStoreRelease(&array[idx], v); } template inline void VisitDexCachePairs(std::atomic>* pairs, size_t num_pairs, const Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { // Check both the data pointer and count since the array might be initialized // concurrently on other thread, and we might observe just one of the values. for (size_t i = 0; pairs != nullptr && i < num_pairs; ++i) { DexCachePair source = pairs[i].load(std::memory_order_relaxed); // NOTE: We need the "template" keyword here to avoid a compilation // failure. GcRoot is a template argument-dependent type and we need to // tell the compiler to treat "Read" as a template rather than a field or // function. Otherwise, on encountering the "<" token, the compiler would // treat "Read" as a field. T* const before = source.object.template Read(); visitor.VisitRootIfNonNull(source.object.AddressWithoutBarrier()); if (source.object.template Read() != before) { pairs[i].store(source, std::memory_order_relaxed); } } } template inline void DexCache::VisitReferences(ObjPtr klass, const Visitor& visitor) { // Visit instance fields first. VisitInstanceFieldsReferences(klass, visitor); // Visit arrays after. if (kVisitNativeRoots) { VisitDexCachePairs( GetStrings(), NumStrings(), visitor); VisitDexCachePairs( GetResolvedTypes(), NumResolvedTypes(), visitor); VisitDexCachePairs( GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor); GcRoot* resolved_call_sites = GetResolvedCallSites(); size_t num_call_sites = NumResolvedCallSites(); for (size_t i = 0; resolved_call_sites != nullptr && i != num_call_sites; ++i) { visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier()); } } } template inline ObjPtr DexCache::GetLocation() { return GetFieldObject( OFFSET_OF_OBJECT_MEMBER(DexCache, location_)); } } // namespace mirror } // namespace art #endif // ART_RUNTIME_MIRROR_DEX_CACHE_INL_H_