bigbiff | 673c7ae | 2020-12-02 19:44:56 -0500 | [diff] [blame] | 1 | /* libs/pixelflinger/codeflinger/CodeCache.cpp |
| 2 | ** |
| 3 | ** Copyright 2006, The Android Open Source Project |
| 4 | ** |
| 5 | ** Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | ** you may not use this file except in compliance with the License. |
| 7 | ** You may obtain a copy of the License at |
| 8 | ** |
| 9 | ** http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | ** |
| 11 | ** Unless required by applicable law or agreed to in writing, software |
| 12 | ** distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | ** See the License for the specific language governing permissions and |
| 15 | ** limitations under the License. |
| 16 | */ |
| 17 | |
| 18 | #define LOG_TAG "CodeCache" |
| 19 | |
| 20 | #include <assert.h> |
| 21 | #include <stdio.h> |
| 22 | #include <stdlib.h> |
| 23 | #include <sys/mman.h> |
| 24 | #include <unistd.h> |
| 25 | |
| 26 | #include <cutils/ashmem.h> |
| 27 | #include <log/log.h> |
| 28 | |
| 29 | #include "CodeCache.h" |
| 30 | |
| 31 | namespace android { |
| 32 | |
| 33 | // ---------------------------------------------------------------------------- |
| 34 | |
| 35 | #if defined(__arm__) || defined(__aarch64__) |
| 36 | #include <unistd.h> |
| 37 | #include <errno.h> |
| 38 | #endif |
| 39 | |
| 40 | #if defined(__mips__) |
| 41 | #include <asm/cachectl.h> |
| 42 | #include <errno.h> |
| 43 | #endif |
| 44 | |
| 45 | // ---------------------------------------------------------------------------- |
| 46 | // ---------------------------------------------------------------------------- |
| 47 | |
| 48 | // A dlmalloc mspace is used to manage the code cache over a mmaped region. |
| 49 | #define HAVE_MMAP 0 |
| 50 | #define HAVE_MREMAP 0 |
| 51 | #define HAVE_MORECORE 0 |
| 52 | #define MALLOC_ALIGNMENT 16 |
| 53 | #define MSPACES 1 |
| 54 | #define NO_MALLINFO 1 |
| 55 | #define ONLY_MSPACES 1 |
| 56 | // Custom heap error handling. |
| 57 | #define PROCEED_ON_ERROR 0 |
| 58 | static void heap_error(const char* msg, const char* function, void* p); |
| 59 | #define CORRUPTION_ERROR_ACTION(m) \ |
| 60 | heap_error("HEAP MEMORY CORRUPTION", __FUNCTION__, NULL) |
| 61 | #define USAGE_ERROR_ACTION(m,p) \ |
| 62 | heap_error("ARGUMENT IS INVALID HEAP ADDRESS", __FUNCTION__, p) |
| 63 | |
| 64 | #pragma GCC diagnostic push |
| 65 | #pragma GCC diagnostic ignored "-Wexpansion-to-defined" |
| 66 | #pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic" |
| 67 | #include "../../../../external/dlmalloc/malloc.c" |
| 68 | #pragma GCC diagnostic pop |
| 69 | |
| 70 | static void heap_error(const char* msg, const char* function, void* p) { |
| 71 | ALOG(LOG_FATAL, LOG_TAG, "@@@ ABORTING: CODE FLINGER: %s IN %s addr=%p", |
| 72 | msg, function, p); |
| 73 | /* So that we can get a memory dump around p */ |
| 74 | *((int **) 0xdeadbaad) = (int *) p; |
| 75 | } |
| 76 | |
| 77 | // ---------------------------------------------------------------------------- |
| 78 | |
| 79 | static void* gExecutableStore = NULL; |
| 80 | static mspace gMspace = NULL; |
| 81 | const size_t kMaxCodeCacheCapacity = 1024 * 1024; |
| 82 | |
| 83 | static mspace getMspace() |
| 84 | { |
| 85 | if (gExecutableStore == NULL) { |
| 86 | int fd = ashmem_create_region("CodeFlinger code cache", |
| 87 | kMaxCodeCacheCapacity); |
| 88 | LOG_ALWAYS_FATAL_IF(fd < 0, |
| 89 | "Creating code cache, ashmem_create_region " |
| 90 | "failed with error '%s'", strerror(errno)); |
| 91 | gExecutableStore = mmap(NULL, kMaxCodeCacheCapacity, |
| 92 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 93 | MAP_PRIVATE, fd, 0); |
| 94 | LOG_ALWAYS_FATAL_IF(gExecutableStore == MAP_FAILED, |
| 95 | "Creating code cache, mmap failed with error " |
| 96 | "'%s'", strerror(errno)); |
| 97 | close(fd); |
| 98 | gMspace = create_mspace_with_base(gExecutableStore, kMaxCodeCacheCapacity, |
| 99 | /*locked=*/ false); |
| 100 | mspace_set_footprint_limit(gMspace, kMaxCodeCacheCapacity); |
| 101 | } |
| 102 | return gMspace; |
| 103 | } |
| 104 | |
| 105 | Assembly::Assembly(size_t size) |
| 106 | : mCount(0), mSize(0) |
| 107 | { |
| 108 | mBase = (uint32_t*)mspace_malloc(getMspace(), size); |
| 109 | LOG_ALWAYS_FATAL_IF(mBase == NULL, |
| 110 | "Failed to create Assembly of size %zd in executable " |
| 111 | "store of size %zd", size, kMaxCodeCacheCapacity); |
| 112 | mSize = size; |
| 113 | } |
| 114 | |
| 115 | Assembly::~Assembly() |
| 116 | { |
| 117 | mspace_free(getMspace(), mBase); |
| 118 | } |
| 119 | |
| 120 | void Assembly::incStrong(const void*) const |
| 121 | { |
| 122 | mCount.fetch_add(1, std::memory_order_relaxed); |
| 123 | } |
| 124 | |
| 125 | void Assembly::decStrong(const void*) const |
| 126 | { |
| 127 | if (mCount.fetch_sub(1, std::memory_order_acq_rel) == 1) { |
| 128 | delete this; |
| 129 | } |
| 130 | } |
| 131 | |
| 132 | ssize_t Assembly::size() const |
| 133 | { |
| 134 | if (!mBase) return NO_MEMORY; |
| 135 | return mSize; |
| 136 | } |
| 137 | |
| 138 | uint32_t* Assembly::base() const |
| 139 | { |
| 140 | return mBase; |
| 141 | } |
| 142 | |
| 143 | ssize_t Assembly::resize(size_t newSize) |
| 144 | { |
| 145 | mBase = (uint32_t*)mspace_realloc(getMspace(), mBase, newSize); |
| 146 | LOG_ALWAYS_FATAL_IF(mBase == NULL, |
| 147 | "Failed to resize Assembly to %zd in code cache " |
| 148 | "of size %zd", newSize, kMaxCodeCacheCapacity); |
| 149 | mSize = newSize; |
| 150 | return size(); |
| 151 | } |
| 152 | |
| 153 | // ---------------------------------------------------------------------------- |
| 154 | |
| 155 | CodeCache::CodeCache(size_t size) |
| 156 | : mCacheSize(size), mCacheInUse(0) |
| 157 | { |
| 158 | pthread_mutex_init(&mLock, 0); |
| 159 | } |
| 160 | |
| 161 | CodeCache::~CodeCache() |
| 162 | { |
| 163 | pthread_mutex_destroy(&mLock); |
| 164 | } |
| 165 | |
| 166 | sp<Assembly> CodeCache::lookup(const AssemblyKeyBase& keyBase) const |
| 167 | { |
| 168 | pthread_mutex_lock(&mLock); |
| 169 | sp<Assembly> r; |
| 170 | ssize_t index = mCacheData.indexOfKey(key_t(keyBase)); |
| 171 | if (index >= 0) { |
| 172 | const cache_entry_t& e = mCacheData.valueAt(index); |
| 173 | e.when = mWhen++; |
| 174 | r = e.entry; |
| 175 | } |
| 176 | pthread_mutex_unlock(&mLock); |
| 177 | return r; |
| 178 | } |
| 179 | |
| 180 | int CodeCache::cache( const AssemblyKeyBase& keyBase, |
| 181 | const sp<Assembly>& assembly) |
| 182 | { |
| 183 | pthread_mutex_lock(&mLock); |
| 184 | |
| 185 | const ssize_t assemblySize = assembly->size(); |
| 186 | while (mCacheInUse + assemblySize > mCacheSize) { |
| 187 | // evict the LRU |
| 188 | size_t lru = 0; |
| 189 | size_t count = mCacheData.size(); |
| 190 | for (size_t i=0 ; i<count ; i++) { |
| 191 | const cache_entry_t& e = mCacheData.valueAt(i); |
| 192 | if (e.when < mCacheData.valueAt(lru).when) { |
| 193 | lru = i; |
| 194 | } |
| 195 | } |
| 196 | const cache_entry_t& e = mCacheData.valueAt(lru); |
| 197 | mCacheInUse -= e.entry->size(); |
| 198 | mCacheData.removeItemsAt(lru); |
| 199 | } |
| 200 | |
| 201 | ssize_t err = mCacheData.add(key_t(keyBase), cache_entry_t(assembly, mWhen)); |
| 202 | if (err >= 0) { |
| 203 | mCacheInUse += assemblySize; |
| 204 | mWhen++; |
| 205 | // synchronize caches... |
| 206 | char* base = reinterpret_cast<char*>(assembly->base()); |
| 207 | char* curr = reinterpret_cast<char*>(base + assembly->size()); |
| 208 | __builtin___clear_cache(base, curr); |
| 209 | } |
| 210 | |
| 211 | pthread_mutex_unlock(&mLock); |
| 212 | return err; |
| 213 | } |
| 214 | |
| 215 | // ---------------------------------------------------------------------------- |
| 216 | |
| 217 | }; // namespace android |