/* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include "fuzzer/FuzzedDataProvider.h" using aidl::android::hardware::common::fmq::SynchronizedReadWrite; using aidl::android::hardware::common::fmq::UnsynchronizedWrite; using android::hardware::kSynchronizedReadWrite; using android::hardware::kUnsynchronizedWrite; typedef int32_t payload_t; // The reader will wait for 10 ms static constexpr int kBlockingTimeoutNs = 10000000; /* * MessageQueueBase.h contains asserts when memory allocation fails. So we need * to set a reasonable limit if we want to avoid those asserts. */ static constexpr size_t kAlignment = 8; static constexpr size_t kMaxNumElements = PAGE_SIZE * 10 / sizeof(payload_t) - kAlignment + 1; /* * The read counter can be found in the shared memory 16 bytes before the start * of the ring buffer. */ static constexpr int kReadCounterOffsetBytes = 16; /* * The write counter can be found in the shared memory 8 bytes before the start * of the ring buffer. */ static constexpr int kWriteCounterOffsetBytes = 8; static constexpr int kMaxNumSyncReaders = 1; static constexpr int kMaxNumUnsyncReaders = 5; static constexpr int kMaxDataPerReader = 1000; typedef android::AidlMessageQueue AidlMessageQueueSync; typedef android::AidlMessageQueue AidlMessageQueueUnsync; typedef android::hardware::MessageQueue MessageQueueSync; typedef android::hardware::MessageQueue MessageQueueUnsync; typedef aidl::android::hardware::common::fmq::MQDescriptor AidlMQDescSync; typedef aidl::android::hardware::common::fmq::MQDescriptor AidlMQDescUnsync; typedef android::hardware::MQDescriptorSync MQDescSync; typedef android::hardware::MQDescriptorUnsync MQDescUnsync; static inline uint64_t* getCounterPtr(payload_t* start, int byteOffset) { return reinterpret_cast(reinterpret_cast(start) - byteOffset); } template void reader(const Desc& desc, std::vector readerData, bool userFd) { Queue readMq(desc); if (!readMq.isValid()) { LOG(ERROR) << "read mq invalid"; return; } FuzzedDataProvider fdp(&readerData[0], readerData.size()); payload_t* ring = nullptr; while (fdp.remaining_bytes()) { typename Queue::MemTransaction tx; size_t numElements = fdp.ConsumeIntegralInRange(0, kMaxNumElements); if (!readMq.beginRead(numElements, &tx)) { continue; } const auto& region = tx.getFirstRegion(); payload_t* firstStart = region.getAddress(); // the ring buffer is only next to the read/write counters when there is // no user supplied fd if (!userFd) { if (ring == nullptr) { ring = firstStart; } if (fdp.ConsumeIntegral() == 1) { uint64_t* writeCounter = getCounterPtr(ring, kWriteCounterOffsetBytes); *writeCounter = fdp.ConsumeIntegral(); } } (void)std::to_string(*firstStart); readMq.commitRead(numElements); } } template void readerBlocking(const Desc& desc, std::vector& readerData, std::atomic& readersNotFinished, std::atomic& writersNotFinished) { android::base::ScopeGuard guard([&readersNotFinished]() { readersNotFinished--; }); Queue readMq(desc); if (!readMq.isValid()) { LOG(ERROR) << "read mq invalid"; return; } FuzzedDataProvider fdp(&readerData[0], readerData.size()); do { size_t count = fdp.remaining_bytes() ? fdp.ConsumeIntegralInRange(1, readMq.getQuantumCount()) : 1; std::vector data; data.resize(count); readMq.readBlocking(data.data(), count, kBlockingTimeoutNs); } while (fdp.remaining_bytes() > sizeof(size_t) && writersNotFinished > 0); } // Can't use blocking calls with Unsync queues(there is a static_assert) template <> void readerBlocking(const AidlMQDescUnsync&, std::vector&, std::atomic&, std::atomic&) {} template <> void readerBlocking(const MQDescUnsync&, std::vector&, std::atomic&, std::atomic&) {} template void writer(Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) { payload_t* ring = nullptr; while (fdp.remaining_bytes()) { typename Queue::MemTransaction tx; size_t numElements = 1; if (!writeMq.beginWrite(numElements, &tx)) { // need to consume something so we don't end up looping forever fdp.ConsumeIntegral(); continue; } const auto& region = tx.getFirstRegion(); payload_t* firstStart = region.getAddress(); // the ring buffer is only next to the read/write counters when there is // no user supplied fd if (!userFd) { if (ring == nullptr) { ring = firstStart; } if (fdp.ConsumeIntegral() == 1) { uint64_t* readCounter = getCounterPtr(ring, kReadCounterOffsetBytes); *readCounter = fdp.ConsumeIntegral(); } } *firstStart = fdp.ConsumeIntegral(); writeMq.commitWrite(numElements); } } template void writerBlocking(Queue& writeMq, FuzzedDataProvider& fdp, std::atomic& writersNotFinished, std::atomic& readersNotFinished) { android::base::ScopeGuard guard([&writersNotFinished]() { writersNotFinished--; }); while (fdp.remaining_bytes() > sizeof(size_t) && readersNotFinished > 0) { size_t count = fdp.ConsumeIntegralInRange(1, writeMq.getQuantumCount()); std::vector data; for (int i = 0; i < count; i++) { data.push_back(fdp.ConsumeIntegral()); } writeMq.writeBlocking(data.data(), count, kBlockingTimeoutNs); } } // Can't use blocking calls with Unsync queues(there is a static_assert) template <> void writerBlocking(AidlMessageQueueUnsync&, FuzzedDataProvider&, std::atomic&, std::atomic&) {} template <> void writerBlocking(MessageQueueUnsync&, FuzzedDataProvider&, std::atomic&, std::atomic&) {} template void fuzzAidlWithReaders(std::vector& writerData, std::vector>& readerData, bool blocking) { FuzzedDataProvider fdp(&writerData[0], writerData.size()); bool evFlag = blocking || fdp.ConsumeBool(); android::base::unique_fd dataFd; size_t bufferSize = 0; size_t numElements = fdp.ConsumeIntegralInRange(1, kMaxNumElements); bool userFd = fdp.ConsumeBool(); if (userFd) { // run test with our own data region bufferSize = numElements * sizeof(payload_t); dataFd.reset(::ashmem_create_region("SyncReadWrite", bufferSize)); } Queue writeMq(numElements, evFlag, std::move(dataFd), bufferSize); if (!writeMq.isValid()) { LOG(ERROR) << "AIDL write mq invalid"; return; } const auto desc = writeMq.dupeDesc(); CHECK(desc.handle.fds[0].get() != -1); std::atomic readersNotFinished = readerData.size(); std::atomic writersNotFinished = 1; std::vector readers; for (int i = 0; i < readerData.size(); i++) { if (blocking) { readers.emplace_back(readerBlocking, std::ref(desc), std::ref(readerData[i]), std::ref(readersNotFinished), std::ref(writersNotFinished)); } else { readers.emplace_back(reader, std::ref(desc), std::ref(readerData[i]), userFd); } } if (blocking) { writerBlocking(writeMq, fdp, writersNotFinished, readersNotFinished); } else { writer(writeMq, fdp, userFd); } for (auto& reader : readers) { reader.join(); } } template void fuzzHidlWithReaders(std::vector& writerData, std::vector>& readerData, bool blocking) { FuzzedDataProvider fdp(&writerData[0], writerData.size()); bool evFlag = blocking || fdp.ConsumeBool(); android::base::unique_fd dataFd; size_t bufferSize = 0; size_t numElements = fdp.ConsumeIntegralInRange(1, kMaxNumElements); bool userFd = fdp.ConsumeBool(); if (userFd) { // run test with our own data region bufferSize = numElements * sizeof(payload_t); dataFd.reset(::ashmem_create_region("SyncReadWrite", bufferSize)); } Queue writeMq(numElements, evFlag, std::move(dataFd), bufferSize); if (!writeMq.isValid()) { LOG(ERROR) << "HIDL write mq invalid"; return; } const auto desc = writeMq.getDesc(); CHECK(desc->isHandleValid()); std::atomic readersNotFinished = readerData.size(); std::atomic writersNotFinished = 1; std::vector readers; for (int i = 0; i < readerData.size(); i++) { if (blocking) { readers.emplace_back(readerBlocking, std::ref(*desc), std::ref(readerData[i]), std::ref(readersNotFinished), std::ref(writersNotFinished)); } else { readers.emplace_back(reader, std::ref(*desc), std::ref(readerData[i]), userFd); } } if (blocking) { writerBlocking(writeMq, fdp, writersNotFinished, readersNotFinished); } else { writer(writeMq, fdp, userFd); } for (auto& reader : readers) { reader.join(); } } extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { if (size < 1 || size > 50000) { return 0; } FuzzedDataProvider fdp(data, size); bool fuzzSync = fdp.ConsumeBool(); std::vector> readerData; uint8_t numReaders = fuzzSync ? fdp.ConsumeIntegralInRange(0, kMaxNumSyncReaders) : fdp.ConsumeIntegralInRange(0, kMaxNumUnsyncReaders); for (int i = 0; i < numReaders; i++) { readerData.emplace_back(fdp.ConsumeBytes(kMaxDataPerReader)); } bool fuzzBlocking = fdp.ConsumeBool(); std::vector writerData = fdp.ConsumeRemainingBytes(); if (fuzzSync) { fuzzHidlWithReaders(writerData, readerData, fuzzBlocking); fuzzAidlWithReaders(writerData, readerData, fuzzBlocking); } else { fuzzHidlWithReaders(writerData, readerData, false); fuzzAidlWithReaders(writerData, readerData, false); } return 0; }