370 lines
13 KiB
C++
370 lines
13 KiB
C++
|
/*
|
||
|
** Copyright 2011, The Android Open Source Project
|
||
|
**
|
||
|
** Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
** you may not use this file except in compliance with the License.
|
||
|
** You may obtain a copy of the License at
|
||
|
**
|
||
|
** http://www.apache.org/licenses/LICENSE-2.0
|
||
|
**
|
||
|
** Unless required by applicable law or agreed to in writing, software
|
||
|
** distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
** See the License for the specific language governing permissions and
|
||
|
** limitations under the License.
|
||
|
*/
|
||
|
|
||
|
//#define LOG_NDEBUG 0
|
||
|
|
||
|
#include "BlobCache.h"
|
||
|
|
||
|
#include <android-base/properties.h>
|
||
|
#include <errno.h>
|
||
|
#include <inttypes.h>
|
||
|
#include <log/log.h>
|
||
|
|
||
|
#include <chrono>
|
||
|
|
||
|
namespace android {
|
||
|
|
||
|
// BlobCache::Header::mMagicNumber value
|
||
|
static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
|
||
|
|
||
|
// BlobCache::Header::mBlobCacheVersion value
|
||
|
static const uint32_t blobCacheVersion = 3;
|
||
|
|
||
|
// BlobCache::Header::mDeviceVersion value
|
||
|
static const uint32_t blobCacheDeviceVersion = 1;
|
||
|
|
||
|
BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize)
|
||
|
: mMaxTotalSize(maxTotalSize),
|
||
|
mMaxKeySize(maxKeySize),
|
||
|
mMaxValueSize(maxValueSize),
|
||
|
mTotalSize(0) {
|
||
|
int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
|
||
|
#ifdef _WIN32
|
||
|
srand(now);
|
||
|
#else
|
||
|
mRandState[0] = (now >> 0) & 0xFFFF;
|
||
|
mRandState[1] = (now >> 16) & 0xFFFF;
|
||
|
mRandState[2] = (now >> 32) & 0xFFFF;
|
||
|
#endif
|
||
|
ALOGV("initializing random seed using %lld", (unsigned long long)now);
|
||
|
}
|
||
|
|
||
|
BlobCache::InsertResult BlobCache::set(const void* key, size_t keySize, const void* value,
|
||
|
size_t valueSize) {
|
||
|
if (mMaxKeySize < keySize) {
|
||
|
ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", keySize,
|
||
|
mMaxKeySize);
|
||
|
return InsertResult::kKeyTooBig;
|
||
|
}
|
||
|
if (mMaxValueSize < valueSize) {
|
||
|
ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", valueSize,
|
||
|
mMaxValueSize);
|
||
|
return InsertResult::kValueTooBig;
|
||
|
}
|
||
|
if (mMaxTotalSize < keySize + valueSize) {
|
||
|
ALOGV("set: not caching because the combined key/value size is too "
|
||
|
"large: %zu (limit: %zu)",
|
||
|
keySize + valueSize, mMaxTotalSize);
|
||
|
return InsertResult::kCombinedTooBig;
|
||
|
}
|
||
|
if (keySize == 0) {
|
||
|
ALOGW("set: not caching because keySize is 0");
|
||
|
return InsertResult::kInvalidKeySize;
|
||
|
}
|
||
|
if (valueSize == 0) {
|
||
|
ALOGW("set: not caching because valueSize is 0");
|
||
|
return InsertResult::kInvalidValueSize;
|
||
|
}
|
||
|
|
||
|
std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
|
||
|
CacheEntry cacheEntry(cacheKey, nullptr);
|
||
|
|
||
|
bool didClean = false;
|
||
|
while (true) {
|
||
|
auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
|
||
|
if (index == mCacheEntries.end() || cacheEntry < *index) {
|
||
|
// Create a new cache entry.
|
||
|
std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
|
||
|
std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
|
||
|
size_t newTotalSize = mTotalSize + keySize + valueSize;
|
||
|
if (mMaxTotalSize < newTotalSize) {
|
||
|
if (isCleanable()) {
|
||
|
// Clean the cache and try again.
|
||
|
clean();
|
||
|
didClean = true;
|
||
|
continue;
|
||
|
} else {
|
||
|
ALOGV("set: not caching new key/value pair because the "
|
||
|
"total cache size limit would be exceeded: %zu "
|
||
|
"(limit: %zu)",
|
||
|
keySize + valueSize, mMaxTotalSize);
|
||
|
return InsertResult::kNotEnoughSpace;
|
||
|
}
|
||
|
}
|
||
|
mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob));
|
||
|
mTotalSize = newTotalSize;
|
||
|
ALOGV("set: created new cache entry with %zu byte key and %zu byte value", keySize,
|
||
|
valueSize);
|
||
|
} else {
|
||
|
// Update the existing cache entry.
|
||
|
std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
|
||
|
std::shared_ptr<Blob> oldValueBlob(index->getValue());
|
||
|
size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
|
||
|
if (mMaxTotalSize < newTotalSize) {
|
||
|
if (isCleanable()) {
|
||
|
// Clean the cache and try again.
|
||
|
clean();
|
||
|
didClean = true;
|
||
|
continue;
|
||
|
} else {
|
||
|
ALOGV("set: not caching new value because the total cache "
|
||
|
"size limit would be exceeded: %zu (limit: %zu)",
|
||
|
keySize + valueSize, mMaxTotalSize);
|
||
|
return InsertResult::kNotEnoughSpace;
|
||
|
}
|
||
|
}
|
||
|
index->setValue(valueBlob);
|
||
|
mTotalSize = newTotalSize;
|
||
|
ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
|
||
|
"value",
|
||
|
keySize, valueSize);
|
||
|
}
|
||
|
return didClean ? InsertResult::kDidClean : InsertResult::kInserted;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
size_t BlobCache::get(const void* key, size_t keySize, void* value, size_t valueSize) {
|
||
|
if (mMaxKeySize < keySize) {
|
||
|
ALOGV("get: not searching because the key is too large: %zu (limit %zu)", keySize,
|
||
|
mMaxKeySize);
|
||
|
return 0;
|
||
|
}
|
||
|
std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
|
||
|
CacheEntry cacheEntry(cacheKey, nullptr);
|
||
|
auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
|
||
|
if (index == mCacheEntries.end() || cacheEntry < *index) {
|
||
|
ALOGV("get: no cache entry found for key of size %zu", keySize);
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
// The key was found. Return the value if the caller's buffer is large
|
||
|
// enough.
|
||
|
std::shared_ptr<Blob> valueBlob(index->getValue());
|
||
|
size_t valueBlobSize = valueBlob->getSize();
|
||
|
if (valueBlobSize <= valueSize) {
|
||
|
ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
|
||
|
memcpy(value, valueBlob->getData(), valueBlobSize);
|
||
|
} else {
|
||
|
ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)", valueSize,
|
||
|
valueBlobSize);
|
||
|
}
|
||
|
return valueBlobSize;
|
||
|
}
|
||
|
|
||
|
static inline size_t align4(size_t size) {
|
||
|
return (size + 3) & ~3;
|
||
|
}
|
||
|
|
||
|
size_t BlobCache::getFlattenedSize() const {
|
||
|
auto buildId = base::GetProperty("ro.build.id", "");
|
||
|
size_t size = align4(sizeof(Header) + buildId.size());
|
||
|
for (const CacheEntry& e : mCacheEntries) {
|
||
|
std::shared_ptr<Blob> const& keyBlob = e.getKey();
|
||
|
std::shared_ptr<Blob> const& valueBlob = e.getValue();
|
||
|
size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
|
||
|
}
|
||
|
return size;
|
||
|
}
|
||
|
|
||
|
int BlobCache::flatten(void* buffer, size_t size) const {
|
||
|
// Write the cache header
|
||
|
if (size < sizeof(Header)) {
|
||
|
ALOGE("flatten: not enough room for cache header");
|
||
|
return 0;
|
||
|
}
|
||
|
Header* header = reinterpret_cast<Header*>(buffer);
|
||
|
header->mMagicNumber = blobCacheMagic;
|
||
|
header->mBlobCacheVersion = blobCacheVersion;
|
||
|
header->mDeviceVersion = blobCacheDeviceVersion;
|
||
|
header->mNumEntries = mCacheEntries.size();
|
||
|
auto buildId = base::GetProperty("ro.build.id", "");
|
||
|
header->mBuildIdLength = buildId.size();
|
||
|
memcpy(header->mBuildId, buildId.c_str(), header->mBuildIdLength);
|
||
|
|
||
|
// Write cache entries
|
||
|
uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
|
||
|
off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
|
||
|
for (const CacheEntry& e : mCacheEntries) {
|
||
|
std::shared_ptr<Blob> const& keyBlob = e.getKey();
|
||
|
std::shared_ptr<Blob> const& valueBlob = e.getValue();
|
||
|
size_t keySize = keyBlob->getSize();
|
||
|
size_t valueSize = valueBlob->getSize();
|
||
|
|
||
|
size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
|
||
|
size_t totalSize = align4(entrySize);
|
||
|
if (byteOffset + totalSize > size) {
|
||
|
ALOGE("flatten: not enough room for cache entries");
|
||
|
return -EINVAL;
|
||
|
}
|
||
|
|
||
|
EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
|
||
|
eheader->mKeySize = keySize;
|
||
|
eheader->mValueSize = valueSize;
|
||
|
|
||
|
memcpy(eheader->mData, keyBlob->getData(), keySize);
|
||
|
memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
|
||
|
|
||
|
if (totalSize > entrySize) {
|
||
|
// We have padding bytes. Those will get written to storage, and contribute to the CRC,
|
||
|
// so make sure we zero-them to have reproducible results.
|
||
|
memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
|
||
|
}
|
||
|
|
||
|
byteOffset += totalSize;
|
||
|
}
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
int BlobCache::unflatten(void const* buffer, size_t size) {
|
||
|
// All errors should result in the BlobCache being in an empty state.
|
||
|
mCacheEntries.clear();
|
||
|
|
||
|
// Read the cache header
|
||
|
if (size < sizeof(Header)) {
|
||
|
ALOGE("unflatten: not enough room for cache header");
|
||
|
return -EINVAL;
|
||
|
}
|
||
|
const Header* header = reinterpret_cast<const Header*>(buffer);
|
||
|
if (header->mMagicNumber != blobCacheMagic) {
|
||
|
ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
|
||
|
return -EINVAL;
|
||
|
}
|
||
|
auto buildId = base::GetProperty("ro.build.id", "");
|
||
|
if (header->mBlobCacheVersion != blobCacheVersion ||
|
||
|
header->mDeviceVersion != blobCacheDeviceVersion ||
|
||
|
buildId.size() != header->mBuildIdLength ||
|
||
|
strncmp(buildId.c_str(), header->mBuildId, buildId.size())) {
|
||
|
// We treat version mismatches as an empty cache.
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
// Read cache entries
|
||
|
const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
|
||
|
off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
|
||
|
size_t numEntries = header->mNumEntries;
|
||
|
for (size_t i = 0; i < numEntries; i++) {
|
||
|
if (byteOffset + sizeof(EntryHeader) > size) {
|
||
|
mCacheEntries.clear();
|
||
|
ALOGE("unflatten: not enough room for cache entry headers");
|
||
|
return -EINVAL;
|
||
|
}
|
||
|
|
||
|
const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(&byteBuffer[byteOffset]);
|
||
|
size_t keySize = eheader->mKeySize;
|
||
|
size_t valueSize = eheader->mValueSize;
|
||
|
size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
|
||
|
|
||
|
size_t totalSize = align4(entrySize);
|
||
|
if (byteOffset + totalSize > size) {
|
||
|
mCacheEntries.clear();
|
||
|
ALOGE("unflatten: not enough room for cache entry headers");
|
||
|
return -EINVAL;
|
||
|
}
|
||
|
|
||
|
const uint8_t* data = eheader->mData;
|
||
|
set(data, keySize, data + keySize, valueSize);
|
||
|
|
||
|
byteOffset += totalSize;
|
||
|
}
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
long int BlobCache::blob_random() {
|
||
|
#ifdef _WIN32
|
||
|
return rand();
|
||
|
#else
|
||
|
return nrand48(mRandState);
|
||
|
#endif
|
||
|
}
|
||
|
|
||
|
void BlobCache::clean() {
|
||
|
// Remove a random cache entry until the total cache size gets below half
|
||
|
// the maximum total cache size.
|
||
|
while (mTotalSize > mMaxTotalSize / 2) {
|
||
|
size_t i = size_t(blob_random() % (mCacheEntries.size()));
|
||
|
const CacheEntry& entry(mCacheEntries[i]);
|
||
|
mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
|
||
|
mCacheEntries.erase(mCacheEntries.begin() + i);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
bool BlobCache::isCleanable() const {
|
||
|
return mTotalSize > mMaxTotalSize / 2;
|
||
|
}
|
||
|
|
||
|
BlobCache::Blob::Blob(const void* data, size_t size, bool copyData)
|
||
|
: mData(copyData ? malloc(size) : data), mSize(size), mOwnsData(copyData) {
|
||
|
if (data != nullptr && copyData) {
|
||
|
memcpy(const_cast<void*>(mData), data, size);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
BlobCache::Blob::~Blob() {
|
||
|
if (mOwnsData) {
|
||
|
free(const_cast<void*>(mData));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
bool BlobCache::Blob::operator<(const Blob& rhs) const {
|
||
|
if (mSize == rhs.mSize) {
|
||
|
return memcmp(mData, rhs.mData, mSize) < 0;
|
||
|
} else {
|
||
|
return mSize < rhs.mSize;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
const void* BlobCache::Blob::getData() const {
|
||
|
return mData;
|
||
|
}
|
||
|
|
||
|
size_t BlobCache::Blob::getSize() const {
|
||
|
return mSize;
|
||
|
}
|
||
|
|
||
|
BlobCache::CacheEntry::CacheEntry() {}
|
||
|
|
||
|
BlobCache::CacheEntry::CacheEntry(const std::shared_ptr<Blob>& key,
|
||
|
const std::shared_ptr<Blob>& value)
|
||
|
: mKey(key), mValue(value) {}
|
||
|
|
||
|
BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce) : mKey(ce.mKey), mValue(ce.mValue) {}
|
||
|
|
||
|
bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
|
||
|
return *mKey < *rhs.mKey;
|
||
|
}
|
||
|
|
||
|
const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
|
||
|
mKey = rhs.mKey;
|
||
|
mValue = rhs.mValue;
|
||
|
return *this;
|
||
|
}
|
||
|
|
||
|
std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
|
||
|
return mKey;
|
||
|
}
|
||
|
|
||
|
std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
|
||
|
return mValue;
|
||
|
}
|
||
|
|
||
|
void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
|
||
|
mValue = value;
|
||
|
}
|
||
|
|
||
|
} // namespace android
|