1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2DmaBufAllocator"
19 
20 #include <BufferAllocator/BufferAllocator.h>
21 #include <C2Buffer.h>
22 #include <C2Debug.h>
23 #include <C2DmaBufAllocator.h>
24 #include <C2ErrnoUtils.h>
25 
26 #include <linux/ion.h>
27 #include <sys/mman.h>
28 #include <unistd.h>  // getpagesize, size_t, close, dup
29 #include <utils/Log.h>
30 
31 #include <list>
32 
33 #include <android-base/properties.h>
34 
35 namespace android {
36 
37 namespace {
38     constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
39 
40     // max padding after ion/dmabuf allocations in bytes
41     constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
42 }
43 
44 /* =========================== BUFFER HANDLE =========================== */
45 /**
46  * Buffer handle
47  *
48  * Stores dmabuf fd & metadata
49  *
50  * This handle will not capture mapped fd-s as updating that would require a
51  * global mutex.
52  */
53 
54 struct C2HandleBuf : public C2Handle {
C2HandleBufandroid::C2HandleBuf55     C2HandleBuf(int bufferFd, size_t size)
56         : C2Handle(cHeader),
57           mFds{bufferFd},
58           mInts{int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic} {}
59 
60     static bool IsValid(const C2Handle* const o);
61 
bufferFdandroid::C2HandleBuf62     int bufferFd() const { return mFds.mBuffer; }
sizeandroid::C2HandleBuf63     size_t size() const {
64         return size_t(unsigned(mInts.mSizeLo)) | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
65     }
66 
67    protected:
68     struct {
69         int mBuffer;  // dmabuf fd
70     } mFds;
71     struct {
72         int mSizeLo;  // low 32-bits of size
73         int mSizeHi;  // high 32-bits of size
74         int mMagic;
75     } mInts;
76 
77    private:
78     typedef C2HandleBuf _type;
79     enum {
80         kMagic = '\xc2io\x00',
81         numFds = sizeof(mFds) / sizeof(int),
82         numInts = sizeof(mInts) / sizeof(int),
83         version = sizeof(C2Handle)
84     };
85     // constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
86     const static C2Handle cHeader;
87 };
88 
89 const C2Handle C2HandleBuf::cHeader = {
90         C2HandleBuf::version, C2HandleBuf::numFds, C2HandleBuf::numInts, {}};
91 
92 // static
IsValid(const C2Handle * const o)93 bool C2HandleBuf::IsValid(const C2Handle* const o) {
94     if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
95         return false;
96     }
97     const C2HandleBuf* other = static_cast<const C2HandleBuf*>(o);
98     return other->mInts.mMagic == kMagic;
99 }
100 
101 /* =========================== DMABUF ALLOCATION =========================== */
102 class C2DmaBufAllocation : public C2LinearAllocation {
103    public:
104     /* Interface methods */
105     virtual c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
106                             void** addr /* nonnull */) override;
107     virtual c2_status_t unmap(void* addr, size_t size, C2Fence* fenceFd) override;
108     virtual ~C2DmaBufAllocation() override;
109     virtual const C2Handle* handle() const override;
110     virtual id_t getAllocatorId() const override;
111     virtual bool equals(const std::shared_ptr<C2LinearAllocation>& other) const override;
112 
113     // internal methods
114 
115     /**
116       * Constructs an allocation via a new allocation.
117       *
118       * @param alloc     allocator
119       * @param allocSize size used for the allocator
120       * @param capacity  capacity advertised to the client
121       * @param heap_name name of the dmabuf heap (device)
122       * @param flags     flags
123       * @param id        allocator id
124       */
125     C2DmaBufAllocation(BufferAllocator& alloc, size_t allocSize, size_t capacity,
126                        C2String heap_name, unsigned flags, C2Allocator::id_t id);
127 
128     /**
129       * Constructs an allocation by wrapping an existing allocation.
130       *
131       * @param size    capacity advertised to the client
132       * @param shareFd dmabuf fd of the wrapped allocation
133       * @param id      allocator id
134       */
135     C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id);
136 
137     c2_status_t status() const;
138 
139    protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)140     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
141                                     int prot, int flags, void** base, void** addr) {
142         c2_status_t err = C2_OK;
143         *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
144         ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
145               "returned (%d)",
146               mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
147         if (*base == MAP_FAILED) {
148             *base = *addr = nullptr;
149             err = c2_map_errno<EINVAL>(errno);
150         } else {
151             *addr = (uint8_t*)*base + alignmentBytes;
152         }
153         return err;
154     }
155 
156     C2Allocator::id_t mId;
157     C2HandleBuf mHandle;
158     c2_status_t mInit;
159     struct Mapping {
160         void* addr;
161         size_t alignmentBytes;
162         size_t size;
163     };
164     std::list<Mapping> mMappings;
165 
166     // TODO: we could make this encapsulate shared_ptr and copiable
167     C2_DO_NOT_COPY(C2DmaBufAllocation);
168 };
169 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)170 c2_status_t C2DmaBufAllocation::map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
171                                     void** addr) {
172     (void)fence;  // TODO: wait for fence
173     *addr = nullptr;
174     if (!mMappings.empty()) {
175         ALOGV("multiple map");
176         // TODO: technically we should return DUPLICATE here, but our block views
177         // don't actually unmap, so we end up remapping the buffer multiple times.
178         //
179         // return C2_DUPLICATE;
180     }
181     if (size == 0) {
182         return C2_BAD_VALUE;
183     }
184 
185     int prot = PROT_NONE;
186     int flags = MAP_SHARED;
187     if (usage.expected & C2MemoryUsage::CPU_READ) {
188         prot |= PROT_READ;
189     }
190     if (usage.expected & C2MemoryUsage::CPU_WRITE) {
191         prot |= PROT_WRITE;
192     }
193 
194     size_t alignmentBytes = offset % PAGE_SIZE;
195     size_t mapOffset = offset - alignmentBytes;
196     size_t mapSize = size + alignmentBytes;
197     Mapping map = {nullptr, alignmentBytes, mapSize};
198 
199     c2_status_t err =
200             mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
201     if (map.addr) {
202         mMappings.push_back(map);
203     }
204     return err;
205 }
206 
unmap(void * addr,size_t size,C2Fence * fence)207 c2_status_t C2DmaBufAllocation::unmap(void* addr, size_t size, C2Fence* fence) {
208     if (mMappings.empty()) {
209         ALOGD("tried to unmap unmapped buffer");
210         return C2_NOT_FOUND;
211     }
212     for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
213         if (addr != (uint8_t*)it->addr + it->alignmentBytes ||
214             size + it->alignmentBytes != it->size) {
215             continue;
216         }
217         int err = munmap(it->addr, it->size);
218         if (err != 0) {
219             ALOGD("munmap failed");
220             return c2_map_errno<EINVAL>(errno);
221         }
222         if (fence) {
223             *fence = C2Fence();  // not using fences
224         }
225         (void)mMappings.erase(it);
226         ALOGV("successfully unmapped: %d", mHandle.bufferFd());
227         return C2_OK;
228     }
229     ALOGD("unmap failed to find specified map");
230     return C2_BAD_VALUE;
231 }
232 
status() const233 c2_status_t C2DmaBufAllocation::status() const {
234     return mInit;
235 }
236 
getAllocatorId() const237 C2Allocator::id_t C2DmaBufAllocation::getAllocatorId() const {
238     return mId;
239 }
240 
equals(const std::shared_ptr<C2LinearAllocation> & other) const241 bool C2DmaBufAllocation::equals(const std::shared_ptr<C2LinearAllocation>& other) const {
242     if (!other || other->getAllocatorId() != getAllocatorId()) {
243         return false;
244     }
245     // get user handle to compare objects
246     std::shared_ptr<C2DmaBufAllocation> otherAsBuf =
247             std::static_pointer_cast<C2DmaBufAllocation>(other);
248     return mHandle.bufferFd() == otherAsBuf->mHandle.bufferFd();
249 }
250 
handle() const251 const C2Handle* C2DmaBufAllocation::handle() const {
252     return &mHandle;
253 }
254 
~C2DmaBufAllocation()255 C2DmaBufAllocation::~C2DmaBufAllocation() {
256     if (!mMappings.empty()) {
257         ALOGD("Dangling mappings!");
258         for (const Mapping& map : mMappings) {
259             int err = munmap(map.addr, map.size);
260             if (err) ALOGD("munmap failed");
261         }
262     }
263     if (mInit == C2_OK) {
264         native_handle_close(&mHandle);
265     }
266 }
267 
C2DmaBufAllocation(BufferAllocator & alloc,size_t allocSize,size_t capacity,C2String heap_name,unsigned flags,C2Allocator::id_t id)268 C2DmaBufAllocation::C2DmaBufAllocation(BufferAllocator& alloc, size_t allocSize, size_t capacity,
269                                        C2String heap_name, unsigned flags, C2Allocator::id_t id)
270     : C2LinearAllocation(capacity), mHandle(-1, 0) {
271     int bufferFd = -1;
272     int ret = 0;
273 
274     bufferFd = alloc.Alloc(heap_name, allocSize, flags);
275     if (bufferFd < 0) {
276         ret = bufferFd;
277     }
278 
279     // this may be a non-working handle if bufferFd is negative
280     mHandle = C2HandleBuf(bufferFd, capacity);
281     mId = id;
282     mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
283 }
284 
C2DmaBufAllocation(size_t size,int shareFd,C2Allocator::id_t id)285 C2DmaBufAllocation::C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id)
286     : C2LinearAllocation(size), mHandle(-1, 0) {
287     mHandle = C2HandleBuf(shareFd, size);
288     mId = id;
289     mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(0));
290 }
291 
292 /* =========================== DMABUF ALLOCATOR =========================== */
C2DmaBufAllocator(id_t id)293 C2DmaBufAllocator::C2DmaBufAllocator(id_t id) : mInit(C2_OK) {
294     C2MemoryUsage minUsage = {0, 0};
295     C2MemoryUsage maxUsage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
296     Traits traits = {"android.allocator.dmabuf", id, LINEAR, minUsage, maxUsage};
297     mTraits = std::make_shared<Traits>(traits);
298 }
299 
getId() const300 C2Allocator::id_t C2DmaBufAllocator::getId() const {
301     std::lock_guard<std::mutex> lock(mUsageMapperLock);
302     return mTraits->id;
303 }
304 
getName() const305 C2String C2DmaBufAllocator::getName() const {
306     std::lock_guard<std::mutex> lock(mUsageMapperLock);
307     return mTraits->name;
308 }
309 
getTraits() const310 std::shared_ptr<const C2Allocator::Traits> C2DmaBufAllocator::getTraits() const {
311     std::lock_guard<std::mutex> lock(mUsageMapperLock);
312     return mTraits;
313 }
314 
setUsageMapper(const UsageMapperFn & mapper __unused,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)315 void C2DmaBufAllocator::setUsageMapper(const UsageMapperFn& mapper __unused, uint64_t minUsage,
316                                        uint64_t maxUsage, uint64_t blockSize) {
317     std::lock_guard<std::mutex> lock(mUsageMapperLock);
318     mUsageMapperCache.clear();
319     mUsageMapperLru.clear();
320     mUsageMapper = mapper;
321     Traits traits = {mTraits->name, mTraits->id, LINEAR, C2MemoryUsage(minUsage),
322                      C2MemoryUsage(maxUsage)};
323     mTraits = std::make_shared<Traits>(traits);
324     mBlockSize = blockSize;
325 }
326 
operator ()(const MapperKey & k) const327 std::size_t C2DmaBufAllocator::MapperKeyHash::operator()(const MapperKey& k) const {
328     return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
329 }
330 
mapUsage(C2MemoryUsage usage,size_t capacity,C2String * heap_name,unsigned * flags)331 c2_status_t C2DmaBufAllocator::mapUsage(C2MemoryUsage usage, size_t capacity, C2String* heap_name,
332                                         unsigned* flags) {
333     std::lock_guard<std::mutex> lock(mUsageMapperLock);
334     c2_status_t res = C2_OK;
335     // align capacity
336     capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
337     MapperKey key = std::make_pair(usage.expected, capacity);
338     auto entry = mUsageMapperCache.find(key);
339     if (entry == mUsageMapperCache.end()) {
340         if (mUsageMapper) {
341             res = mUsageMapper(usage, capacity, heap_name, flags);
342         } else {
343             if (C2DmaBufAllocator::system_uncached_supported() &&
344                 !(usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)))
345                 *heap_name = "system-uncached";
346             else
347                 *heap_name = "system";
348             *flags = 0;
349             res = C2_NO_INIT;
350         }
351         // add usage to cache
352         MapperValue value = std::make_tuple(*heap_name, *flags, res);
353         mUsageMapperLru.emplace_front(key, value);
354         mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
355         if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
356             // remove LRU entry
357             MapperKey lruKey = mUsageMapperLru.front().first;
358             mUsageMapperCache.erase(lruKey);
359             mUsageMapperLru.pop_back();
360         }
361     } else {
362         // move entry to MRU
363         mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
364         const MapperValue& value = entry->second->second;
365         std::tie(*heap_name, *flags, res) = value;
366     }
367     return res;
368 }
369 
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)370 c2_status_t C2DmaBufAllocator::newLinearAllocation(
371         uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation>* allocation) {
372     if (allocation == nullptr) {
373         return C2_BAD_VALUE;
374     }
375 
376     allocation->reset();
377     if (mInit != C2_OK) {
378         return mInit;
379     }
380 
381     C2String heap_name;
382     unsigned flags = 0;
383     c2_status_t ret = mapUsage(usage, capacity, &heap_name, &flags);
384     if (ret && ret != C2_NO_INIT) {
385         return ret;
386     }
387 
388     // TODO: should we pad before mapping usage?
389 
390     // NOTE: read this property directly from the property as this code has to run on
391     // Android Q, but the sysprop was only introduced in Android S.
392     static size_t sPadding =
393         base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
394     if (sPadding > SIZE_MAX - capacity) {
395         // size would overflow
396         ALOGD("dmabuf_alloc: size #%x cannot accommodate padding #%zx", capacity, sPadding);
397         return C2_NO_MEMORY;
398     }
399 
400     size_t allocSize = (size_t)capacity + sPadding;
401     // TODO: should we align allocation size to mBlockSize to reflect the true allocation size?
402     std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
403             mBufferAllocator, allocSize, allocSize - sPadding, heap_name, flags, getId());
404     ret = alloc->status();
405     if (ret == C2_OK) {
406         *allocation = alloc;
407     }
408     return ret;
409 }
410 
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)411 c2_status_t C2DmaBufAllocator::priorLinearAllocation(
412         const C2Handle* handle, std::shared_ptr<C2LinearAllocation>* allocation) {
413     *allocation = nullptr;
414     if (mInit != C2_OK) {
415         return mInit;
416     }
417 
418     if (!C2HandleBuf::IsValid(handle)) {
419         return C2_BAD_VALUE;
420     }
421 
422     // TODO: get capacity and validate it
423     const C2HandleBuf* h = static_cast<const C2HandleBuf*>(handle);
424     std::shared_ptr<C2DmaBufAllocation> alloc =
425             std::make_shared<C2DmaBufAllocation>(h->size(), h->bufferFd(), getId());
426     c2_status_t ret = alloc->status();
427     if (ret == C2_OK) {
428         *allocation = alloc;
429         native_handle_delete(
430                 const_cast<native_handle_t*>(reinterpret_cast<const native_handle_t*>(handle)));
431     }
432     return ret;
433 }
434 
435 // static
CheckHandle(const C2Handle * const o)436 bool C2DmaBufAllocator::CheckHandle(const C2Handle* const o) {
437     return C2HandleBuf::IsValid(o);
438 }
439 
440 }  // namespace android
441