1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2AllocatorIon"
19 #include <utils/Log.h>
20
21 #include <list>
22
23 #include <ion/ion.h>
24 #include <sys/mman.h>
25 #include <unistd.h> // getpagesize, size_t, close, dup
26
27 #include <C2AllocatorIon.h>
28 #include <C2Buffer.h>
29 #include <C2Debug.h>
30 #include <C2ErrnoUtils.h>
31 #include <C2HandleIonInternal.h>
32
33 #include <android-base/properties.h>
34
35 namespace android {
36
37 namespace {
38 constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
39
40 // max padding after ion/dmabuf allocations in bytes
41 constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
42 }
43
44 /* size_t <=> int(lo), int(hi) conversions */
size2intLo(size_t s)45 constexpr inline int size2intLo(size_t s) {
46 return int(s & 0xFFFFFFFF);
47 }
48
size2intHi(size_t s)49 constexpr inline int size2intHi(size_t s) {
50 // cast to uint64_t as size_t may be 32 bits wide
51 return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
52 }
53
ints2size(int intLo,int intHi)54 constexpr inline size_t ints2size(int intLo, int intHi) {
55 // convert in 2 stages to 64 bits as intHi may be negative
56 return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
57 }
58
59 /* ========================================= ION HANDLE ======================================== */
60 /**
61 * ION handle
62 *
63 * There can be only a sole ion client per process, this is captured in the ion fd that is passed
64 * to the constructor, but this should be managed by the ion buffer allocator/mapper.
65 *
66 * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
67 * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
68 * a refcount.
69 *
70 * This handle will not capture mapped fd-s as updating that would require a global mutex.
71 */
72
73 const C2Handle C2HandleIon::cHeader = {
74 C2HandleIon::version,
75 C2HandleIon::numFds,
76 C2HandleIon::numInts,
77 {}
78 };
79
80 // static
IsValid(const C2Handle * const o)81 bool C2HandleIon::IsValid(const C2Handle * const o) {
82 if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
83 return false;
84 }
85 const C2HandleIon *other = static_cast<const C2HandleIon*>(o);
86 return other->mInts.mMagic == kMagic;
87 }
88
89 // TODO: is the dup of an ion fd identical to ion_share?
90
91 /* ======================================= ION ALLOCATION ====================================== */
92 class C2AllocationIon : public C2LinearAllocation {
93 public:
94 /* Interface methods */
95 virtual c2_status_t map(
96 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence,
97 void **addr /* nonnull */) override;
98 virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override;
99 virtual ~C2AllocationIon() override;
100 virtual const C2Handle *handle() const override;
101 virtual id_t getAllocatorId() const override;
102 virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override;
103
104 // internal methods
105 C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
106 C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id);
107
108 c2_status_t status() const;
109
110 protected:
111 class Impl;
112 class ImplV2;
113 Impl *mImpl;
114
115 // TODO: we could make this encapsulate shared_ptr and copiable
116 C2_DO_NOT_COPY(C2AllocationIon);
117 };
118
119 class C2AllocationIon::Impl {
120 protected:
121 /**
122 * Constructs an ion allocation.
123 *
124 * \note We always create an ion allocation, even if the allocation or import fails
125 * so that we can capture the error.
126 *
127 * \param ionFd ion client (ownership transferred to created object)
128 * \param capacity size of allocation
129 * \param bufferFd buffer handle (ownership transferred to created object). Must be
130 * invalid if err is not 0.
131 * \param buffer ion buffer user handle (ownership transferred to created object). Must be
132 * invalid if err is not 0.
133 * \param err errno during buffer allocation or import
134 */
Impl(int ionFd,size_t capacity,int bufferFd,ion_user_handle_t buffer,C2Allocator::id_t id,int err)135 Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err)
136 : mIonFd(ionFd),
137 mHandle(bufferFd, capacity),
138 mBuffer(buffer),
139 mId(id),
140 mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
141 mMapFd(-1) {
142 if (mInit != C2_OK) {
143 // close ionFd now on error
144 if (mIonFd >= 0) {
145 close(mIonFd);
146 mIonFd = -1;
147 }
148 // C2_CHECK(bufferFd < 0);
149 // C2_CHECK(buffer < 0);
150 }
151 }
152
153 public:
154 /**
155 * Constructs an ion allocation by importing a shared buffer fd.
156 *
157 * \param ionFd ion client (ownership transferred to created object)
158 * \param capacity size of allocation
159 * \param bufferFd buffer handle (ownership transferred to created object)
160 *
161 * \return created ion allocation (implementation) which may be invalid if the
162 * import failed.
163 */
164 static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
165
166 /**
167 * Constructs an ion allocation by allocating an ion buffer.
168 *
169 * \param ionFd ion client (ownership transferred to created object)
170 * \param size size of allocation
171 * \param align desired alignment of allocation
172 * \param heapMask mask of heaps considered
173 * \param flags ion allocation flags
174 *
175 * \return created ion allocation (implementation) which may be invalid if the
176 * allocation failed.
177 */
178 static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
179
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)180 c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
181 (void)fence; // TODO: wait for fence
182 *addr = nullptr;
183 if (!mMappings.empty()) {
184 ALOGV("multiple map");
185 // TODO: technically we should return DUPLICATE here, but our block views don't
186 // actually unmap, so we end up remapping an ion buffer multiple times.
187 //
188 // return C2_DUPLICATE;
189 }
190 if (size == 0) {
191 return C2_BAD_VALUE;
192 }
193
194 int prot = PROT_NONE;
195 int flags = MAP_SHARED;
196 if (usage.expected & C2MemoryUsage::CPU_READ) {
197 prot |= PROT_READ;
198 }
199 if (usage.expected & C2MemoryUsage::CPU_WRITE) {
200 prot |= PROT_WRITE;
201 }
202
203 size_t alignmentBytes = offset % PAGE_SIZE;
204 size_t mapOffset = offset - alignmentBytes;
205 size_t mapSize = size + alignmentBytes;
206 Mapping map = { nullptr, alignmentBytes, mapSize };
207
208 c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
209 if (map.addr) {
210 std::lock_guard<std::mutex> guard(mMutexMappings);
211 mMappings.push_back(map);
212 }
213 return err;
214 }
215
unmap(void * addr,size_t size,C2Fence * fence)216 c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
217 if (mMappings.empty()) {
218 ALOGD("tried to unmap unmapped buffer");
219 return C2_NOT_FOUND;
220 }
221 { // Scope for the lock_guard of mMutexMappings.
222 std::lock_guard<std::mutex> guard(mMutexMappings);
223 for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
224 if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
225 size + it->alignmentBytes != it->size) {
226 continue;
227 }
228 int err = munmap(it->addr, it->size);
229 if (err != 0) {
230 ALOGD("munmap failed");
231 return c2_map_errno<EINVAL>(errno);
232 }
233 if (fence) {
234 *fence = C2Fence(); // not using fences
235 }
236 (void)mMappings.erase(it);
237 ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size,
238 mHandle.bufferFd());
239 return C2_OK;
240 }
241 }
242 ALOGD("unmap failed to find specified map");
243 return C2_BAD_VALUE;
244 }
245
~Impl()246 virtual ~Impl() {
247 if (!mMappings.empty()) {
248 ALOGD("Dangling mappings!");
249 std::lock_guard<std::mutex> guard(mMutexMappings);
250 for (const Mapping &map : mMappings) {
251 (void)munmap(map.addr, map.size);
252 }
253 }
254 if (mMapFd >= 0) {
255 close(mMapFd);
256 mMapFd = -1;
257 }
258 if (mInit == C2_OK) {
259 if (mBuffer >= 0) {
260 (void)ion_free(mIonFd, mBuffer);
261 }
262 native_handle_close(&mHandle);
263 }
264 if (mIonFd >= 0) {
265 close(mIonFd);
266 }
267 }
268
status() const269 c2_status_t status() const {
270 return mInit;
271 }
272
handle() const273 const C2Handle *handle() const {
274 return &mHandle;
275 }
276
getAllocatorId() const277 C2Allocator::id_t getAllocatorId() const {
278 return mId;
279 }
280
ionHandle() const281 virtual ion_user_handle_t ionHandle() const {
282 return mBuffer;
283 }
284
285 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)286 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
287 int prot, int flags, void** base, void** addr) {
288 c2_status_t err = C2_OK;
289 if (mMapFd == -1) {
290 int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
291 flags, mapOffset, (unsigned char**)base, &mMapFd);
292 ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
293 "offset = %zu) returned (%d)",
294 mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
295 if (ret) {
296 mMapFd = -1;
297 *base = *addr = nullptr;
298 err = c2_map_errno<EINVAL>(-ret);
299 } else {
300 *addr = (uint8_t *)*base + alignmentBytes;
301 }
302 } else {
303 *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
304 ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
305 "returned (%d)",
306 mapSize, prot, flags, mMapFd, mapOffset, errno);
307 if (*base == MAP_FAILED) {
308 *base = *addr = nullptr;
309 err = c2_map_errno<EINVAL>(errno);
310 } else {
311 *addr = (uint8_t *)*base + alignmentBytes;
312 }
313 }
314 return err;
315 }
316
317 int mIonFd;
318 C2HandleIon mHandle;
319 ion_user_handle_t mBuffer;
320 C2Allocator::id_t mId;
321 c2_status_t mInit;
322 int mMapFd; // only one for now
323 struct Mapping {
324 void *addr;
325 size_t alignmentBytes;
326 size_t size;
327 };
328 std::list<Mapping> mMappings;
329 std::mutex mMutexMappings;
330 };
331
332 class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
333 public:
334 /**
335 * Constructs an ion allocation for platforms with new (ion_4.12.h) api
336 *
337 * \note We always create an ion allocation, even if the allocation or import fails
338 * so that we can capture the error.
339 *
340 * \param ionFd ion client (ownership transferred to created object)
341 * \param capacity size of allocation
342 * \param bufferFd buffer handle (ownership transferred to created object). Must be
343 * invalid if err is not 0.
344 * \param err errno during buffer allocation or import
345 */
ImplV2(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id,int err)346 ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
347 : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
348 }
349
350 virtual ~ImplV2() = default;
351
ionHandle() const352 virtual ion_user_handle_t ionHandle() const {
353 return mHandle.bufferFd();
354 }
355
356 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)357 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
358 int prot, int flags, void** base, void** addr) {
359 c2_status_t err = C2_OK;
360 *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
361 ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
362 "returned (%d)",
363 mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
364 if (*base == MAP_FAILED) {
365 *base = *addr = nullptr;
366 err = c2_map_errno<EINVAL>(errno);
367 } else {
368 *addr = (uint8_t *)*base + alignmentBytes;
369 }
370 return err;
371 }
372
373 };
374
Import(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id)375 C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
376 C2Allocator::id_t id) {
377 int ret = 0;
378 if (ion_is_legacy(ionFd)) {
379 ion_user_handle_t buffer = -1;
380 ret = ion_import(ionFd, bufferFd, &buffer);
381 return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
382 } else {
383 return new ImplV2(ionFd, capacity, bufferFd, id, ret);
384 }
385 }
386
Alloc(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)387 C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
388 unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
389 int bufferFd = -1;
390 ion_user_handle_t buffer = -1;
391 // NOTE: read this property directly from the property as this code has to run on
392 // Android Q, but the sysprop was only introduced in Android S.
393 static size_t sPadding =
394 base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
395 if (sPadding > SIZE_MAX - size) {
396 ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
397 // use ImplV2 as there is no allocation anyways
398 return new ImplV2(ionFd, size, -1, id, -ENOMEM);
399 }
400
401 size_t allocSize = size + sPadding;
402 if (align) {
403 if (align - 1 > SIZE_MAX - allocSize) {
404 ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
405 size, sPadding, align);
406 // use ImplV2 as there is no allocation anyways
407 return new ImplV2(ionFd, size, -1, id, -ENOMEM);
408 }
409 allocSize += align - 1;
410 allocSize &= ~(align - 1);
411 }
412 int ret;
413
414 if (ion_is_legacy(ionFd)) {
415 ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
416 ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
417 "returned (%d) ; buffer = %d",
418 ionFd, allocSize, align, heapMask, flags, ret, buffer);
419 if (ret == 0) {
420 // get buffer fd for native handle constructor
421 ret = ion_share(ionFd, buffer, &bufferFd);
422 if (ret != 0) {
423 ion_free(ionFd, buffer);
424 buffer = -1;
425 }
426 }
427 // the padding is not usable so deduct it from the advertised capacity
428 return new Impl(ionFd, allocSize - sPadding, bufferFd, buffer, id, ret);
429 } else {
430 ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
431 ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
432 "returned (%d) ; bufferFd = %d",
433 ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
434
435 // the padding is not usable so deduct it from the advertised capacity
436 return new ImplV2(ionFd, allocSize - sPadding, bufferFd, id, ret);
437 }
438 }
439
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)440 c2_status_t C2AllocationIon::map(
441 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
442 return mImpl->map(offset, size, usage, fence, addr);
443 }
444
unmap(void * addr,size_t size,C2Fence * fence)445 c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) {
446 return mImpl->unmap(addr, size, fence);
447 }
448
status() const449 c2_status_t C2AllocationIon::status() const {
450 return mImpl->status();
451 }
452
getAllocatorId() const453 C2Allocator::id_t C2AllocationIon::getAllocatorId() const {
454 return mImpl->getAllocatorId();
455 }
456
equals(const std::shared_ptr<C2LinearAllocation> & other) const457 bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const {
458 if (!other || other->getAllocatorId() != getAllocatorId()) {
459 return false;
460 }
461 // get user handle to compare objects
462 std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other);
463 return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle();
464 }
465
handle() const466 const C2Handle *C2AllocationIon::handle() const {
467 return mImpl->handle();
468 }
469
~C2AllocationIon()470 C2AllocationIon::~C2AllocationIon() {
471 delete mImpl;
472 }
473
C2AllocationIon(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)474 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align,
475 unsigned heapMask, unsigned flags, C2Allocator::id_t id)
476 : C2LinearAllocation(size),
477 mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { }
478
C2AllocationIon(int ionFd,size_t size,int shareFd,C2Allocator::id_t id)479 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id)
480 : C2LinearAllocation(size),
481 mImpl(Impl::Import(ionFd, size, shareFd, id)) { }
482
483 /* ======================================= ION ALLOCATOR ====================================== */
C2AllocatorIon(id_t id)484 C2AllocatorIon::C2AllocatorIon(id_t id)
485 : mInit(C2_OK),
486 mIonFd(ion_open()) {
487 if (mIonFd < 0) {
488 switch (errno) {
489 case ENOENT: mInit = C2_OMITTED; break;
490 default: mInit = c2_map_errno<EACCES>(errno); break;
491 }
492 } else {
493 C2MemoryUsage minUsage = { 0, 0 };
494 C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
495 Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage };
496 mTraits = std::make_shared<Traits>(traits);
497 mBlockSize = ::getpagesize();
498 }
499 }
500
~C2AllocatorIon()501 C2AllocatorIon::~C2AllocatorIon() {
502 if (mInit == C2_OK) {
503 ion_close(mIonFd);
504 }
505 }
506
getId() const507 C2Allocator::id_t C2AllocatorIon::getId() const {
508 std::lock_guard<std::mutex> lock(mUsageMapperLock);
509 return mTraits->id;
510 }
511
getName() const512 C2String C2AllocatorIon::getName() const {
513 std::lock_guard<std::mutex> lock(mUsageMapperLock);
514 return mTraits->name;
515 }
516
getTraits() const517 std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const {
518 std::lock_guard<std::mutex> lock(mUsageMapperLock);
519 return mTraits;
520 }
521
setUsageMapper(const UsageMapperFn & mapper,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)522 void C2AllocatorIon::setUsageMapper(
523 const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) {
524 std::lock_guard<std::mutex> lock(mUsageMapperLock);
525 mUsageMapperCache.clear();
526 mUsageMapperLru.clear();
527 mUsageMapper = mapper;
528 Traits traits = {
529 mTraits->name, mTraits->id, LINEAR,
530 C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage)
531 };
532 mTraits = std::make_shared<Traits>(traits);
533 mBlockSize = blockSize;
534 }
535
operator ()(const MapperKey & k) const536 std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const {
537 return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
538 }
539
mapUsage(C2MemoryUsage usage,size_t capacity,size_t * align,unsigned * heapMask,unsigned * flags)540 c2_status_t C2AllocatorIon::mapUsage(
541 C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) {
542 std::lock_guard<std::mutex> lock(mUsageMapperLock);
543 c2_status_t res = C2_OK;
544 // align capacity
545 capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
546 MapperKey key = std::make_pair(usage.expected, capacity);
547 auto entry = mUsageMapperCache.find(key);
548 if (entry == mUsageMapperCache.end()) {
549 if (mUsageMapper) {
550 res = mUsageMapper(usage, capacity, align, heapMask, flags);
551 } else {
552 *align = 0; // TODO make this 1
553 *heapMask = ~0; // default mask
554 if (usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)) {
555 *flags = ION_FLAG_CACHED; // cache CPU accessed buffers
556 } else {
557 *flags = 0; // default flags
558 }
559 res = C2_NO_INIT;
560 }
561 // add usage to cache
562 MapperValue value = std::make_tuple(*align, *heapMask, *flags, res);
563 mUsageMapperLru.emplace_front(key, value);
564 mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
565 if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
566 // remove LRU entry
567 MapperKey lruKey = mUsageMapperLru.front().first;
568 mUsageMapperCache.erase(lruKey);
569 mUsageMapperLru.pop_back();
570 }
571 } else {
572 // move entry to MRU
573 mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
574 const MapperValue &value = entry->second->second;
575 std::tie(*align, *heapMask, *flags, res) = value;
576 }
577 return res;
578 }
579
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)580 c2_status_t C2AllocatorIon::newLinearAllocation(
581 uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) {
582 if (allocation == nullptr) {
583 return C2_BAD_VALUE;
584 }
585
586 allocation->reset();
587 if (mInit != C2_OK) {
588 return mInit;
589 }
590
591 size_t align = 0;
592 unsigned heapMask = ~0;
593 unsigned flags = 0;
594 c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags);
595 if (ret && ret != C2_NO_INIT) {
596 return ret;
597 }
598
599 std::shared_ptr<C2AllocationIon> alloc
600 = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
601 ret = alloc->status();
602 if (ret == C2_OK) {
603 *allocation = alloc;
604 }
605 return ret;
606 }
607
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)608 c2_status_t C2AllocatorIon::priorLinearAllocation(
609 const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) {
610 *allocation = nullptr;
611 if (mInit != C2_OK) {
612 return mInit;
613 }
614
615 if (!C2HandleIon::IsValid(handle)) {
616 return C2_BAD_VALUE;
617 }
618
619 // TODO: get capacity and validate it
620 const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
621 std::shared_ptr<C2AllocationIon> alloc
622 = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
623 c2_status_t ret = alloc->status();
624 if (ret == C2_OK) {
625 *allocation = alloc;
626 native_handle_delete(const_cast<native_handle_t*>(
627 reinterpret_cast<const native_handle_t*>(handle)));
628 }
629 return ret;
630 }
631
CheckHandle(const C2Handle * const o)632 bool C2AllocatorIon::CheckHandle(const C2Handle* const o) {
633 return C2HandleIon::IsValid(o);
634 }
635
636 } // namespace android
637