1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <asm-generic/mman.h>
18 #include <fmq/AidlMessageQueue.h>
19 #include <fmq/ConvertMQDescriptors.h>
20 #include <fmq/EventFlag.h>
21 #include <fmq/MessageQueue.h>
22 #include <gtest/gtest-death-test.h>
23 #include <gtest/gtest.h>
24 #include <sys/resource.h>
25 #include <atomic>
26 #include <cstdlib>
27 #include <sstream>
28 #include <thread>
29
30 using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
31 using aidl::android::hardware::common::fmq::UnsynchronizedWrite;
32 using android::hardware::kSynchronizedReadWrite;
33 using android::hardware::kUnsynchronizedWrite;
34
35 enum EventFlagBits : uint32_t {
36 kFmqNotEmpty = 1 << 0,
37 kFmqNotFull = 1 << 1,
38 };
39
40 typedef android::AidlMessageQueue<uint8_t, SynchronizedReadWrite> AidlMessageQueueSync;
41 typedef android::AidlMessageQueue<uint8_t, UnsynchronizedWrite> AidlMessageQueueUnsync;
42 typedef android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite> MessageQueueSync;
43 typedef android::hardware::MessageQueue<uint8_t, kUnsynchronizedWrite> MessageQueueUnsync;
44 typedef android::AidlMessageQueue<uint16_t, SynchronizedReadWrite> AidlMessageQueueSync16;
45 typedef android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> MessageQueueSync16;
46
47 typedef android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite> MessageQueueSync8;
48 typedef android::hardware::MQDescriptor<uint8_t, kSynchronizedReadWrite> HidlMQDescSync8;
49 typedef android::AidlMessageQueue<int8_t, SynchronizedReadWrite> AidlMessageQueueSync8;
50 typedef aidl::android::hardware::common::fmq::MQDescriptor<int8_t, SynchronizedReadWrite>
51 AidlMQDescSync8;
52 typedef android::hardware::MessageQueue<uint8_t, kUnsynchronizedWrite> MessageQueueUnsync8;
53 typedef android::hardware::MQDescriptor<uint8_t, kUnsynchronizedWrite> HidlMQDescUnsync8;
54 typedef android::AidlMessageQueue<int8_t, UnsynchronizedWrite> AidlMessageQueueUnsync8;
55 typedef aidl::android::hardware::common::fmq::MQDescriptor<int8_t, UnsynchronizedWrite>
56 AidlMQDescUnsync8;
57
58 enum class SetupType {
59 SINGLE_FD,
60 DOUBLE_FD,
61 };
62
63 template <typename T, SetupType setupType>
64 class TestParamTypes {
65 public:
66 typedef T MQType;
67 static constexpr SetupType Setup = setupType;
68 };
69
70 // Run everything on both the AIDL and HIDL versions with one and two FDs
71 typedef ::testing::Types<TestParamTypes<AidlMessageQueueSync, SetupType::SINGLE_FD>,
72 TestParamTypes<MessageQueueSync, SetupType::SINGLE_FD>,
73 TestParamTypes<AidlMessageQueueSync, SetupType::DOUBLE_FD>,
74 TestParamTypes<MessageQueueSync, SetupType::DOUBLE_FD>>
75 SyncTypes;
76 typedef ::testing::Types<TestParamTypes<AidlMessageQueueUnsync, SetupType::SINGLE_FD>,
77 TestParamTypes<MessageQueueUnsync, SetupType::SINGLE_FD>,
78 TestParamTypes<AidlMessageQueueUnsync, SetupType::DOUBLE_FD>,
79 TestParamTypes<MessageQueueUnsync, SetupType::DOUBLE_FD>>
80 UnsyncTypes;
81 typedef ::testing::Types<TestParamTypes<AidlMessageQueueSync16, SetupType::SINGLE_FD>,
82 TestParamTypes<MessageQueueSync16, SetupType::SINGLE_FD>,
83 TestParamTypes<AidlMessageQueueSync16, SetupType::DOUBLE_FD>,
84 TestParamTypes<MessageQueueSync16, SetupType::DOUBLE_FD>>
85 BadConfigTypes;
86
87 template <typename T>
88 class TestBase : public ::testing::Test {
89 public:
90 static void ReaderThreadBlocking(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr);
91 static void ReaderThreadBlocking2(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr);
92 };
93
94 TYPED_TEST_CASE(SynchronizedReadWrites, SyncTypes);
95
96 template <typename T>
97 class SynchronizedReadWrites : public TestBase<T> {
98 protected:
TearDown()99 virtual void TearDown() {
100 delete mQueue;
101 }
102
SetUp()103 virtual void SetUp() {
104 static constexpr size_t kNumElementsInQueue = 2048;
105 static constexpr size_t kPayloadSizeBytes = 1;
106 if (T::Setup == SetupType::SINGLE_FD) {
107 mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
108 } else {
109 android::base::unique_fd ringbufferFd(::ashmem_create_region(
110 "SyncReadWrite", kNumElementsInQueue * kPayloadSizeBytes));
111 mQueue = new (std::nothrow)
112 typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
113 kNumElementsInQueue * kPayloadSizeBytes);
114 }
115 ASSERT_NE(nullptr, mQueue);
116 ASSERT_TRUE(mQueue->isValid());
117 mNumMessagesMax = mQueue->getQuantumCount();
118 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
119 }
120
121 typename T::MQType* mQueue = nullptr;
122 size_t mNumMessagesMax = 0;
123 };
124
125 TYPED_TEST_CASE(UnsynchronizedWriteTest, UnsyncTypes);
126
127 template <typename T>
128 class UnsynchronizedWriteTest : public TestBase<T> {
129 protected:
TearDown()130 virtual void TearDown() {
131 delete mQueue;
132 }
133
SetUp()134 virtual void SetUp() {
135 static constexpr size_t kNumElementsInQueue = 2048;
136 static constexpr size_t kPayloadSizeBytes = 1;
137 if (T::Setup == SetupType::SINGLE_FD) {
138 mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
139 } else {
140 android::base::unique_fd ringbufferFd(
141 ::ashmem_create_region("UnsyncWrite", kNumElementsInQueue * kPayloadSizeBytes));
142 mQueue = new (std::nothrow)
143 typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
144 kNumElementsInQueue * kPayloadSizeBytes);
145 }
146 ASSERT_NE(nullptr, mQueue);
147 ASSERT_TRUE(mQueue->isValid());
148 mNumMessagesMax = mQueue->getQuantumCount();
149 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
150 }
151
152 typename T::MQType* mQueue = nullptr;
153 size_t mNumMessagesMax = 0;
154 };
155
156 TYPED_TEST_CASE(BlockingReadWrites, SyncTypes);
157
158 template <typename T>
159 class BlockingReadWrites : public TestBase<T> {
160 protected:
TearDown()161 virtual void TearDown() {
162 delete mQueue;
163 }
SetUp()164 virtual void SetUp() {
165 static constexpr size_t kNumElementsInQueue = 2048;
166 static constexpr size_t kPayloadSizeBytes = 1;
167 if (T::Setup == SetupType::SINGLE_FD) {
168 mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
169 } else {
170 android::base::unique_fd ringbufferFd(::ashmem_create_region(
171 "SyncBlockingReadWrite", kNumElementsInQueue * kPayloadSizeBytes));
172 mQueue = new (std::nothrow)
173 typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
174 kNumElementsInQueue * kPayloadSizeBytes);
175 }
176 ASSERT_NE(nullptr, mQueue);
177 ASSERT_TRUE(mQueue->isValid());
178 mNumMessagesMax = mQueue->getQuantumCount();
179 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
180 /*
181 * Initialize the EventFlag word to indicate Queue is not full.
182 */
183 std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
184 }
185
186 typename T::MQType* mQueue;
187 std::atomic<uint32_t> mFw;
188 size_t mNumMessagesMax = 0;
189 };
190
191 TYPED_TEST_CASE(QueueSizeOdd, SyncTypes);
192
193 template <typename T>
194 class QueueSizeOdd : public TestBase<T> {
195 protected:
TearDown()196 virtual void TearDown() { delete mQueue; }
SetUp()197 virtual void SetUp() {
198 static constexpr size_t kNumElementsInQueue = 2049;
199 static constexpr size_t kPayloadSizeBytes = 1;
200 if (T::Setup == SetupType::SINGLE_FD) {
201 mQueue = new (std::nothrow)
202 typename T::MQType(kNumElementsInQueue, true /* configureEventFlagWord */);
203 } else {
204 android::base::unique_fd ringbufferFd(
205 ::ashmem_create_region("SyncSizeOdd", kNumElementsInQueue * kPayloadSizeBytes));
206 mQueue = new (std::nothrow) typename T::MQType(
207 kNumElementsInQueue, true /* configureEventFlagWord */, std::move(ringbufferFd),
208 kNumElementsInQueue * kPayloadSizeBytes);
209 }
210 ASSERT_NE(nullptr, mQueue);
211 ASSERT_TRUE(mQueue->isValid());
212 mNumMessagesMax = mQueue->getQuantumCount();
213 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
214 auto evFlagWordPtr = mQueue->getEventFlagWord();
215 ASSERT_NE(nullptr, evFlagWordPtr);
216 /*
217 * Initialize the EventFlag word to indicate Queue is not full.
218 */
219 std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(kFmqNotFull));
220 }
221
222 typename T::MQType* mQueue;
223 size_t mNumMessagesMax = 0;
224 };
225
226 TYPED_TEST_CASE(BadQueueConfig, BadConfigTypes);
227
228 template <typename T>
229 class BadQueueConfig : public TestBase<T> {};
230
231 class AidlOnlyBadQueueConfig : public ::testing::Test {};
232 class Hidl2AidlOperation : public ::testing::Test {};
233 class DoubleFdFailures : public ::testing::Test {};
234
235 /*
236 * Utility function to initialize data to be written to the FMQ
237 */
initData(uint8_t * data,size_t count)238 inline void initData(uint8_t* data, size_t count) {
239 for (size_t i = 0; i < count; i++) {
240 data[i] = i & 0xFF;
241 }
242 }
243
244 /*
245 * This thread will attempt to read and block. When wait returns
246 * it checks if the kFmqNotEmpty bit is actually set.
247 * If the read is succesful, it signals Wake to kFmqNotFull.
248 */
249 template <typename T>
ReaderThreadBlocking(typename T::MQType * fmq,std::atomic<uint32_t> * fwAddr)250 void TestBase<T>::ReaderThreadBlocking(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr) {
251 const size_t dataLen = 64;
252 uint8_t data[dataLen];
253 android::hardware::EventFlag* efGroup = nullptr;
254 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
255 ASSERT_EQ(android::NO_ERROR, status);
256 ASSERT_NE(nullptr, efGroup);
257
258 while (true) {
259 uint32_t efState = 0;
260 android::status_t ret = efGroup->wait(kFmqNotEmpty,
261 &efState,
262 5000000000 /* timeoutNanoSeconds */);
263 /*
264 * Wait should not time out here after 5s
265 */
266 ASSERT_NE(android::TIMED_OUT, ret);
267
268 if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
269 efGroup->wake(kFmqNotFull);
270 break;
271 }
272 }
273
274 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
275 ASSERT_EQ(android::NO_ERROR, status);
276 }
277
278 /*
279 * This thread will attempt to read and block using the readBlocking() API and
280 * passes in a pointer to an EventFlag object.
281 */
282 template <typename T>
ReaderThreadBlocking2(typename T::MQType * fmq,std::atomic<uint32_t> * fwAddr)283 void TestBase<T>::ReaderThreadBlocking2(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr) {
284 const size_t dataLen = 64;
285 uint8_t data[dataLen];
286 android::hardware::EventFlag* efGroup = nullptr;
287 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
288 ASSERT_EQ(android::NO_ERROR, status);
289 ASSERT_NE(nullptr, efGroup);
290 bool ret = fmq->readBlocking(data,
291 dataLen,
292 static_cast<uint32_t>(kFmqNotFull),
293 static_cast<uint32_t>(kFmqNotEmpty),
294 5000000000 /* timeOutNanos */,
295 efGroup);
296 ASSERT_TRUE(ret);
297 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
298 ASSERT_EQ(android::NO_ERROR, status);
299 }
300
TYPED_TEST(BadQueueConfig,QueueSizeTooLarge)301 TYPED_TEST(BadQueueConfig, QueueSizeTooLarge) {
302 size_t numElementsInQueue = SIZE_MAX / sizeof(uint16_t) + 1;
303 typename TypeParam::MQType* fmq =
304 new (std::nothrow) typename TypeParam::MQType(numElementsInQueue);
305 ASSERT_NE(nullptr, fmq);
306 /*
307 * Should fail due to size being too large to fit into size_t.
308 */
309 ASSERT_FALSE(fmq->isValid());
310 }
311
312 // If this test fails and we do leak FDs, the next test will cause a crash
TEST_F(AidlOnlyBadQueueConfig,LookForLeakedFds)313 TEST_F(AidlOnlyBadQueueConfig, LookForLeakedFds) {
314 size_t numElementsInQueue = SIZE_MAX / sizeof(uint32_t) - PAGE_SIZE - 1;
315 struct rlimit rlim;
316 ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rlim), 0);
317 for (int i = 0; i <= rlim.rlim_cur + 1; i++) {
318 android::AidlMessageQueue<uint32_t, SynchronizedReadWrite> fmq(numElementsInQueue);
319 ASSERT_FALSE(fmq.isValid());
320 }
321 // try to get another FD
322 int fd = ashmem_create_region("test", 100);
323 ASSERT_NE(fd, -1);
324 close(fd);
325 }
326
TEST_F(Hidl2AidlOperation,ConvertDescriptorsSync)327 TEST_F(Hidl2AidlOperation, ConvertDescriptorsSync) {
328 size_t numElementsInQueue = 64;
329
330 // Create HIDL side and get MQDescriptor
331 MessageQueueSync8* fmq = new (std::nothrow) MessageQueueSync8(numElementsInQueue);
332 ASSERT_NE(nullptr, fmq);
333 ASSERT_TRUE(fmq->isValid());
334 const HidlMQDescSync8* hidlDesc = fmq->getDesc();
335 ASSERT_NE(nullptr, hidlDesc);
336
337 // Create AIDL MQDescriptor to send to another process based off the HIDL MQDescriptor
338 AidlMQDescSync8 aidlDesc;
339 android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(*hidlDesc,
340 &aidlDesc);
341
342 // Other process will create the other side of the queue using the AIDL MQDescriptor
343 AidlMessageQueueSync8* aidlFmq = new (std::nothrow) AidlMessageQueueSync8(aidlDesc);
344 ASSERT_NE(nullptr, aidlFmq);
345 ASSERT_TRUE(aidlFmq->isValid());
346
347 // Make sure a write to the HIDL side, will show up for the AIDL side
348 constexpr size_t dataLen = 4;
349 uint8_t data[dataLen] = {12, 11, 10, 9};
350 fmq->write(data, dataLen);
351
352 int8_t readData[dataLen];
353 ASSERT_TRUE(aidlFmq->read(readData, dataLen));
354
355 ASSERT_EQ(data[0], readData[0]);
356 ASSERT_EQ(data[1], readData[1]);
357 ASSERT_EQ(data[2], readData[2]);
358 ASSERT_EQ(data[3], readData[3]);
359 }
360
TEST_F(Hidl2AidlOperation,ConvertDescriptorsUnsync)361 TEST_F(Hidl2AidlOperation, ConvertDescriptorsUnsync) {
362 size_t numElementsInQueue = 64;
363
364 // Create HIDL side and get MQDescriptor
365 MessageQueueUnsync8* fmq = new (std::nothrow) MessageQueueUnsync8(numElementsInQueue);
366 ASSERT_NE(nullptr, fmq);
367 ASSERT_TRUE(fmq->isValid());
368 const HidlMQDescUnsync8* hidlDesc = fmq->getDesc();
369 ASSERT_NE(nullptr, hidlDesc);
370
371 // Create AIDL MQDescriptor to send to another process based off the HIDL MQDescriptor
372 AidlMQDescUnsync8 aidlDesc;
373 android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(*hidlDesc,
374 &aidlDesc);
375
376 // Other process will create the other side of the queue using the AIDL MQDescriptor
377 AidlMessageQueueUnsync8* aidlFmq = new (std::nothrow) AidlMessageQueueUnsync8(aidlDesc);
378 ASSERT_NE(nullptr, aidlFmq);
379 ASSERT_TRUE(aidlFmq->isValid());
380
381 // Can have multiple readers with unsync flavor
382 AidlMessageQueueUnsync8* aidlFmq2 = new (std::nothrow) AidlMessageQueueUnsync8(aidlDesc);
383 ASSERT_NE(nullptr, aidlFmq2);
384 ASSERT_TRUE(aidlFmq2->isValid());
385
386 // Make sure a write to the HIDL side, will show up for the AIDL side
387 constexpr size_t dataLen = 4;
388 uint8_t data[dataLen] = {12, 11, 10, 9};
389 fmq->write(data, dataLen);
390
391 int8_t readData[dataLen];
392 ASSERT_TRUE(aidlFmq->read(readData, dataLen));
393 int8_t readData2[dataLen];
394 ASSERT_TRUE(aidlFmq2->read(readData2, dataLen));
395
396 ASSERT_EQ(data[0], readData[0]);
397 ASSERT_EQ(data[1], readData[1]);
398 ASSERT_EQ(data[2], readData[2]);
399 ASSERT_EQ(data[3], readData[3]);
400 ASSERT_EQ(data[0], readData2[0]);
401 ASSERT_EQ(data[1], readData2[1]);
402 ASSERT_EQ(data[2], readData2[2]);
403 ASSERT_EQ(data[3], readData2[3]);
404 }
405
TEST_F(Hidl2AidlOperation,ConvertFdIndex1)406 TEST_F(Hidl2AidlOperation, ConvertFdIndex1) {
407 native_handle_t* mqHandle = native_handle_create(2 /* numFds */, 0 /* numInts */);
408 if (mqHandle == nullptr) {
409 return;
410 }
411 mqHandle->data[0] = 12;
412 mqHandle->data[1] = 5;
413 ::android::hardware::hidl_vec<android::hardware::GrantorDescriptor> grantors;
414 grantors.resize(3);
415 grantors[0] = {0, 1 /* fdIndex */, 16, 16};
416 grantors[1] = {0, 1 /* fdIndex */, 16, 16};
417 grantors[2] = {0, 1 /* fdIndex */, 16, 16};
418
419 HidlMQDescUnsync8* hidlDesc = new (std::nothrow) HidlMQDescUnsync8(grantors, mqHandle, 10);
420 ASSERT_TRUE(hidlDesc->isHandleValid());
421
422 AidlMQDescUnsync8 aidlDesc;
423 bool ret = android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(
424 *hidlDesc, &aidlDesc);
425 ASSERT_TRUE(ret);
426 }
427
TEST_F(Hidl2AidlOperation,ConvertMultipleFds)428 TEST_F(Hidl2AidlOperation, ConvertMultipleFds) {
429 native_handle_t* mqHandle = native_handle_create(2 /* numFds */, 0 /* numInts */);
430 if (mqHandle == nullptr) {
431 return;
432 }
433 mqHandle->data[0] = ::ashmem_create_region("ConvertMultipleFds", 8);
434 mqHandle->data[1] = ::ashmem_create_region("ConvertMultipleFds2", 8);
435 ::android::hardware::hidl_vec<android::hardware::GrantorDescriptor> grantors;
436 grantors.resize(3);
437 grantors[0] = {0, 1 /* fdIndex */, 16, 16};
438 grantors[1] = {0, 1 /* fdIndex */, 16, 16};
439 grantors[2] = {0, 0 /* fdIndex */, 16, 16};
440
441 HidlMQDescUnsync8* hidlDesc = new (std::nothrow) HidlMQDescUnsync8(grantors, mqHandle, 10);
442 ASSERT_TRUE(hidlDesc->isHandleValid());
443
444 AidlMQDescUnsync8 aidlDesc;
445 bool ret = android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(
446 *hidlDesc, &aidlDesc);
447 ASSERT_TRUE(ret);
448 EXPECT_EQ(aidlDesc.handle.fds.size(), 2);
449 native_handle_close(mqHandle);
450 native_handle_delete(mqHandle);
451 }
452
453 // TODO(b/165674950) Since AIDL does not support unsigned integers, it can only support
454 // 1/2 the queue size of HIDL. Once support is added to AIDL, this restriction can be
455 // lifted. Until then, check against SSIZE_MAX instead of SIZE_MAX.
TEST_F(AidlOnlyBadQueueConfig,QueueSizeTooLargeForAidl)456 TEST_F(AidlOnlyBadQueueConfig, QueueSizeTooLargeForAidl) {
457 size_t numElementsInQueue = SSIZE_MAX / sizeof(uint16_t) + 1;
458 AidlMessageQueueSync16* fmq = new (std::nothrow) AidlMessageQueueSync16(numElementsInQueue);
459 ASSERT_NE(nullptr, fmq);
460 /*
461 * Should fail due to size being too large to fit into size_t.
462 */
463 ASSERT_FALSE(fmq->isValid());
464 }
465
TEST_F(AidlOnlyBadQueueConfig,NegativeAidlDescriptor)466 TEST_F(AidlOnlyBadQueueConfig, NegativeAidlDescriptor) {
467 aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc;
468 desc.quantum = -10;
469 AidlMessageQueueSync16* fmq = new (std::nothrow) AidlMessageQueueSync16(desc);
470 ASSERT_NE(nullptr, fmq);
471 /*
472 * Should fail due to quantum being negative.
473 */
474 ASSERT_FALSE(fmq->isValid());
475 }
476
TEST_F(AidlOnlyBadQueueConfig,NegativeAidlDescriptorGrantor)477 TEST_F(AidlOnlyBadQueueConfig, NegativeAidlDescriptorGrantor) {
478 aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc;
479 desc.quantum = 2;
480 desc.flags = 0;
481 desc.grantors.push_back(
482 aidl::android::hardware::common::fmq::GrantorDescriptor{.offset = 12, .extent = -10});
483 AidlMessageQueueSync16* fmq = new (std::nothrow) AidlMessageQueueSync16(desc);
484 ASSERT_NE(nullptr, fmq);
485 /*
486 * Should fail due to grantor having negative extent.
487 */
488 ASSERT_FALSE(fmq->isValid());
489 }
490
491 /*
492 * Test creating a new queue from a modified MQDescriptor of another queue.
493 * If MQDescriptor.quantum doesn't match the size of the payload(T), the queue
494 * should be invalid.
495 */
TEST_F(AidlOnlyBadQueueConfig,MismatchedPayloadSize)496 TEST_F(AidlOnlyBadQueueConfig, MismatchedPayloadSize) {
497 AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(64);
498 aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc =
499 fmq.dupeDesc();
500 // This should work fine with the unmodified MQDescriptor
501 AidlMessageQueueSync16 fmq2 = AidlMessageQueueSync16(desc);
502 ASSERT_TRUE(fmq2.isValid());
503
504 // Simulate a difference in payload size between processes handling the queue
505 desc.quantum = 8;
506 AidlMessageQueueSync16 fmq3 = AidlMessageQueueSync16(desc);
507
508 // Should fail due to the quantum not matching the sizeof(uint16_t)
509 ASSERT_FALSE(fmq3.isValid());
510 }
511
512 /*
513 * Test creating a new queue with an invalid fd. This should assert with message
514 * "mRing is null".
515 */
TEST_F(DoubleFdFailures,InvalidFd)516 TEST_F(DoubleFdFailures, InvalidFd) {
517 EXPECT_DEATH_IF_SUPPORTED(AidlMessageQueueSync(64, false, android::base::unique_fd(3000), 64),
518 "mRing is null");
519 }
520
521 /*
522 * Test creating a new queue with a buffer fd and bufferSize smaller than the
523 * requested queue. This should fail to create a valid message queue.
524 */
TEST_F(DoubleFdFailures,InvalidFdSize)525 TEST_F(DoubleFdFailures, InvalidFdSize) {
526 constexpr size_t kNumElementsInQueue = 1024;
527 constexpr size_t kRequiredDataBufferSize = kNumElementsInQueue * sizeof(uint16_t);
528 android::base::unique_fd ringbufferFd(
529 ::ashmem_create_region("SyncReadWrite", kRequiredDataBufferSize - 8));
530 AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(
531 kNumElementsInQueue, false, std::move(ringbufferFd), kRequiredDataBufferSize - 8);
532 EXPECT_FALSE(fmq.isValid());
533 }
534
535 /*
536 * Test creating a new queue with a buffer fd and bufferSize larger than the
537 * requested queue. The message queue should be valid.
538 */
TEST_F(DoubleFdFailures,LargerFdSize)539 TEST_F(DoubleFdFailures, LargerFdSize) {
540 constexpr size_t kNumElementsInQueue = 1024;
541 constexpr size_t kRequiredDataBufferSize = kNumElementsInQueue * sizeof(uint16_t);
542 android::base::unique_fd ringbufferFd(
543 ::ashmem_create_region("SyncReadWrite", kRequiredDataBufferSize + 8));
544 AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(
545 kNumElementsInQueue, false, std::move(ringbufferFd), kRequiredDataBufferSize + 8);
546 EXPECT_TRUE(fmq.isValid());
547 }
548
549 /*
550 * Test that basic blocking works. This test uses the non-blocking read()/write()
551 * APIs.
552 */
TYPED_TEST(BlockingReadWrites,SmallInputTest1)553 TYPED_TEST(BlockingReadWrites, SmallInputTest1) {
554 const size_t dataLen = 64;
555 uint8_t data[dataLen] = {0};
556
557 android::hardware::EventFlag* efGroup = nullptr;
558 android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
559
560 ASSERT_EQ(android::NO_ERROR, status);
561 ASSERT_NE(nullptr, efGroup);
562
563 /*
564 * Start a thread that will try to read and block on kFmqNotEmpty.
565 */
566 std::thread Reader(BlockingReadWrites<TypeParam>::ReaderThreadBlocking, this->mQueue,
567 &this->mFw);
568 struct timespec waitTime = {0, 100 * 1000000};
569 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
570
571 /*
572 * After waiting for some time write into the FMQ
573 * and call Wake on kFmqNotEmpty.
574 */
575 ASSERT_TRUE(this->mQueue->write(data, dataLen));
576 status = efGroup->wake(kFmqNotEmpty);
577 ASSERT_EQ(android::NO_ERROR, status);
578
579 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
580 Reader.join();
581
582 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
583 ASSERT_EQ(android::NO_ERROR, status);
584 }
585
586 /*
587 * Test that basic blocking works. This test uses the
588 * writeBlocking()/readBlocking() APIs.
589 */
TYPED_TEST(BlockingReadWrites,SmallInputTest2)590 TYPED_TEST(BlockingReadWrites, SmallInputTest2) {
591 const size_t dataLen = 64;
592 uint8_t data[dataLen] = {0};
593
594 android::hardware::EventFlag* efGroup = nullptr;
595 android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
596
597 ASSERT_EQ(android::NO_ERROR, status);
598 ASSERT_NE(nullptr, efGroup);
599
600 /*
601 * Start a thread that will try to read and block on kFmqNotEmpty. It will
602 * call wake() on kFmqNotFull when the read is successful.
603 */
604 std::thread Reader(BlockingReadWrites<TypeParam>::ReaderThreadBlocking2, this->mQueue,
605 &this->mFw);
606 bool ret = this->mQueue->writeBlocking(data, dataLen, static_cast<uint32_t>(kFmqNotFull),
607 static_cast<uint32_t>(kFmqNotEmpty),
608 5000000000 /* timeOutNanos */, efGroup);
609 ASSERT_TRUE(ret);
610 Reader.join();
611
612 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
613 ASSERT_EQ(android::NO_ERROR, status);
614 }
615
616 /*
617 * Test that basic blocking times out as intended.
618 */
TYPED_TEST(BlockingReadWrites,BlockingTimeOutTest)619 TYPED_TEST(BlockingReadWrites, BlockingTimeOutTest) {
620 android::hardware::EventFlag* efGroup = nullptr;
621 android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
622
623 ASSERT_EQ(android::NO_ERROR, status);
624 ASSERT_NE(nullptr, efGroup);
625
626 /* Block on an EventFlag bit that no one will wake and time out in 1s */
627 uint32_t efState = 0;
628 android::status_t ret = efGroup->wait(kFmqNotEmpty,
629 &efState,
630 1000000000 /* timeoutNanoSeconds */);
631 /*
632 * Wait should time out in a second.
633 */
634 EXPECT_EQ(android::TIMED_OUT, ret);
635
636 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
637 ASSERT_EQ(android::NO_ERROR, status);
638 }
639
640 /*
641 * Test that odd queue sizes do not cause unaligned error
642 * on access to EventFlag object.
643 */
TYPED_TEST(QueueSizeOdd,EventFlagTest)644 TYPED_TEST(QueueSizeOdd, EventFlagTest) {
645 const size_t dataLen = 64;
646 uint8_t data[dataLen] = {0};
647
648 bool ret = this->mQueue->writeBlocking(data, dataLen, static_cast<uint32_t>(kFmqNotFull),
649 static_cast<uint32_t>(kFmqNotEmpty),
650 5000000000 /* timeOutNanos */);
651 ASSERT_TRUE(ret);
652 }
653
654 /*
655 * Verify that a few bytes of data can be successfully written and read.
656 */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest1)657 TYPED_TEST(SynchronizedReadWrites, SmallInputTest1) {
658 const size_t dataLen = 16;
659 ASSERT_LE(dataLen, this->mNumMessagesMax);
660 uint8_t data[dataLen];
661
662 initData(data, dataLen);
663
664 ASSERT_TRUE(this->mQueue->write(data, dataLen));
665 uint8_t readData[dataLen] = {};
666 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
667 ASSERT_EQ(0, memcmp(data, readData, dataLen));
668 }
669
670 /*
671 * Verify that a few bytes of data can be successfully written and read using
672 * beginRead/beginWrite/CommitRead/CommitWrite
673 */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest2)674 TYPED_TEST(SynchronizedReadWrites, SmallInputTest2) {
675 const size_t dataLen = 16;
676 ASSERT_LE(dataLen, this->mNumMessagesMax);
677 uint8_t data[dataLen];
678
679 initData(data, dataLen);
680
681 typename TypeParam::MQType::MemTransaction tx;
682 ASSERT_TRUE(this->mQueue->beginWrite(dataLen, &tx));
683
684 ASSERT_TRUE(tx.copyTo(data, 0 /* startIdx */, dataLen));
685
686 ASSERT_TRUE(this->mQueue->commitWrite(dataLen));
687
688 uint8_t readData[dataLen] = {};
689
690 ASSERT_TRUE(this->mQueue->beginRead(dataLen, &tx));
691
692 ASSERT_TRUE(tx.copyFrom(readData, 0 /* startIdx */, dataLen));
693
694 ASSERT_TRUE(this->mQueue->commitRead(dataLen));
695
696 ASSERT_EQ(0, memcmp(data, readData, dataLen));
697 }
698
699 /*
700 * Verify that a few bytes of data can be successfully written and read using
701 * beginRead/beginWrite/CommitRead/CommitWrite as well as getSlot().
702 */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest3)703 TYPED_TEST(SynchronizedReadWrites, SmallInputTest3) {
704 const size_t dataLen = 16;
705 ASSERT_LE(dataLen, this->mNumMessagesMax);
706 uint8_t data[dataLen];
707
708 initData(data, dataLen);
709 typename TypeParam::MQType::MemTransaction tx;
710 ASSERT_TRUE(this->mQueue->beginWrite(dataLen, &tx));
711
712 auto first = tx.getFirstRegion();
713 auto second = tx.getSecondRegion();
714
715 ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
716 for (size_t i = 0; i < dataLen; i++) {
717 uint8_t* ptr = tx.getSlot(i);
718 *ptr = data[i];
719 }
720
721 ASSERT_TRUE(this->mQueue->commitWrite(dataLen));
722
723 uint8_t readData[dataLen] = {};
724
725 ASSERT_TRUE(this->mQueue->beginRead(dataLen, &tx));
726
727 first = tx.getFirstRegion();
728 second = tx.getSecondRegion();
729
730 ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
731
732 for (size_t i = 0; i < dataLen; i++) {
733 uint8_t* ptr = tx.getSlot(i);
734 readData[i] = *ptr;
735 }
736
737 ASSERT_TRUE(this->mQueue->commitRead(dataLen));
738
739 ASSERT_EQ(0, memcmp(data, readData, dataLen));
740 }
741
742 /*
743 * Verify that read() returns false when trying to read from an empty queue.
744 */
TYPED_TEST(SynchronizedReadWrites,ReadWhenEmpty1)745 TYPED_TEST(SynchronizedReadWrites, ReadWhenEmpty1) {
746 ASSERT_EQ(0UL, this->mQueue->availableToRead());
747 const size_t dataLen = 2;
748 ASSERT_LE(dataLen, this->mNumMessagesMax);
749 uint8_t readData[dataLen];
750 ASSERT_FALSE(this->mQueue->read(readData, dataLen));
751 }
752
753 /*
754 * Verify that beginRead() returns a MemTransaction object with null pointers when trying
755 * to read from an empty queue.
756 */
TYPED_TEST(SynchronizedReadWrites,ReadWhenEmpty2)757 TYPED_TEST(SynchronizedReadWrites, ReadWhenEmpty2) {
758 ASSERT_EQ(0UL, this->mQueue->availableToRead());
759 const size_t dataLen = 2;
760 ASSERT_LE(dataLen, this->mNumMessagesMax);
761
762 typename TypeParam::MQType::MemTransaction tx;
763 ASSERT_FALSE(this->mQueue->beginRead(dataLen, &tx));
764
765 auto first = tx.getFirstRegion();
766 auto second = tx.getSecondRegion();
767
768 ASSERT_EQ(nullptr, first.getAddress());
769 ASSERT_EQ(nullptr, second.getAddress());
770 }
771
772 /*
773 * Write the queue until full. Verify that another write is unsuccessful.
774 * Verify that availableToWrite() returns 0 as expected.
775 */
TYPED_TEST(SynchronizedReadWrites,WriteWhenFull1)776 TYPED_TEST(SynchronizedReadWrites, WriteWhenFull1) {
777 ASSERT_EQ(0UL, this->mQueue->availableToRead());
778 std::vector<uint8_t> data(this->mNumMessagesMax);
779
780 initData(&data[0], this->mNumMessagesMax);
781 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
782 ASSERT_EQ(0UL, this->mQueue->availableToWrite());
783 ASSERT_FALSE(this->mQueue->write(&data[0], 1));
784
785 std::vector<uint8_t> readData(this->mNumMessagesMax);
786 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
787 ASSERT_EQ(data, readData);
788 }
789
790 /*
791 * Write the queue until full. Verify that beginWrite() returns
792 * a MemTransaction object with null base pointers.
793 */
TYPED_TEST(SynchronizedReadWrites,WriteWhenFull2)794 TYPED_TEST(SynchronizedReadWrites, WriteWhenFull2) {
795 ASSERT_EQ(0UL, this->mQueue->availableToRead());
796 std::vector<uint8_t> data(this->mNumMessagesMax);
797
798 initData(&data[0], this->mNumMessagesMax);
799 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
800 ASSERT_EQ(0UL, this->mQueue->availableToWrite());
801
802 typename TypeParam::MQType::MemTransaction tx;
803 ASSERT_FALSE(this->mQueue->beginWrite(1, &tx));
804
805 auto first = tx.getFirstRegion();
806 auto second = tx.getSecondRegion();
807
808 ASSERT_EQ(nullptr, first.getAddress());
809 ASSERT_EQ(nullptr, second.getAddress());
810 }
811
812 /*
813 * Write a chunk of data equal to the queue size.
814 * Verify that the write is successful and the subsequent read
815 * returns the expected data.
816 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest1)817 TYPED_TEST(SynchronizedReadWrites, LargeInputTest1) {
818 std::vector<uint8_t> data(this->mNumMessagesMax);
819 initData(&data[0], this->mNumMessagesMax);
820 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
821 std::vector<uint8_t> readData(this->mNumMessagesMax);
822 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
823 ASSERT_EQ(data, readData);
824 }
825
826 /*
827 * Attempt to write a chunk of data larger than the queue size.
828 * Verify that it fails. Verify that a subsequent read fails and
829 * the queue is still empty.
830 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest2)831 TYPED_TEST(SynchronizedReadWrites, LargeInputTest2) {
832 ASSERT_EQ(0UL, this->mQueue->availableToRead());
833 const size_t dataLen = 4096;
834 ASSERT_GT(dataLen, this->mNumMessagesMax);
835 std::vector<uint8_t> data(dataLen);
836
837 initData(&data[0], dataLen);
838 ASSERT_FALSE(this->mQueue->write(&data[0], dataLen));
839 std::vector<uint8_t> readData(this->mNumMessagesMax);
840 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
841 ASSERT_NE(data, readData);
842 ASSERT_EQ(0UL, this->mQueue->availableToRead());
843 }
844
845 /*
846 * After the queue is full, try to write more data. Verify that
847 * the attempt returns false. Verify that the attempt did not
848 * affect the pre-existing data in the queue.
849 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest3)850 TYPED_TEST(SynchronizedReadWrites, LargeInputTest3) {
851 std::vector<uint8_t> data(this->mNumMessagesMax);
852 initData(&data[0], this->mNumMessagesMax);
853 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
854 ASSERT_FALSE(this->mQueue->write(&data[0], 1));
855 std::vector<uint8_t> readData(this->mNumMessagesMax);
856 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
857 ASSERT_EQ(data, readData);
858 }
859
860 /*
861 * Verify that beginWrite() returns a MemTransaction with
862 * null base pointers when attempting to write data larger
863 * than the queue size.
864 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest4)865 TYPED_TEST(SynchronizedReadWrites, LargeInputTest4) {
866 ASSERT_EQ(0UL, this->mQueue->availableToRead());
867 const size_t dataLen = 4096;
868 ASSERT_GT(dataLen, this->mNumMessagesMax);
869
870 typename TypeParam::MQType::MemTransaction tx;
871 ASSERT_FALSE(this->mQueue->beginWrite(dataLen, &tx));
872
873 auto first = tx.getFirstRegion();
874 auto second = tx.getSecondRegion();
875
876 ASSERT_EQ(nullptr, first.getAddress());
877 ASSERT_EQ(nullptr, second.getAddress());
878 }
879
880 /*
881 * Verify that multiple reads one after the other return expected data.
882 */
TYPED_TEST(SynchronizedReadWrites,MultipleRead)883 TYPED_TEST(SynchronizedReadWrites, MultipleRead) {
884 const size_t chunkSize = 100;
885 const size_t chunkNum = 5;
886 const size_t dataLen = chunkSize * chunkNum;
887 ASSERT_LE(dataLen, this->mNumMessagesMax);
888 uint8_t data[dataLen];
889
890 initData(data, dataLen);
891 ASSERT_TRUE(this->mQueue->write(data, dataLen));
892 uint8_t readData[dataLen] = {};
893 for (size_t i = 0; i < chunkNum; i++) {
894 ASSERT_TRUE(this->mQueue->read(readData + i * chunkSize, chunkSize));
895 }
896 ASSERT_EQ(0, memcmp(readData, data, dataLen));
897 }
898
899 /*
900 * Verify that multiple writes one after the other happens correctly.
901 */
TYPED_TEST(SynchronizedReadWrites,MultipleWrite)902 TYPED_TEST(SynchronizedReadWrites, MultipleWrite) {
903 const int chunkSize = 100;
904 const int chunkNum = 5;
905 const size_t dataLen = chunkSize * chunkNum;
906 ASSERT_LE(dataLen, this->mNumMessagesMax);
907 uint8_t data[dataLen];
908
909 initData(data, dataLen);
910 for (unsigned int i = 0; i < chunkNum; i++) {
911 ASSERT_TRUE(this->mQueue->write(data + i * chunkSize, chunkSize));
912 }
913 uint8_t readData[dataLen] = {};
914 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
915 ASSERT_EQ(0, memcmp(readData, data, dataLen));
916 }
917
918 /*
919 * Write enough messages into the FMQ to fill half of it
920 * and read back the same.
921 * Write this->mNumMessagesMax messages into the queue. This will cause a
922 * wrap around. Read and verify the data.
923 */
TYPED_TEST(SynchronizedReadWrites,ReadWriteWrapAround1)924 TYPED_TEST(SynchronizedReadWrites, ReadWriteWrapAround1) {
925 size_t numMessages = this->mNumMessagesMax - 1;
926 std::vector<uint8_t> data(this->mNumMessagesMax);
927 std::vector<uint8_t> readData(this->mNumMessagesMax);
928 initData(&data[0], this->mNumMessagesMax);
929 ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
930 ASSERT_TRUE(this->mQueue->read(&readData[0], numMessages));
931 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
932 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
933 ASSERT_EQ(data, readData);
934 }
935
936 /*
937 * Use beginRead/CommitRead/beginWrite/commitWrite APIs
938 * to test wrap arounds are handled correctly.
939 * Write enough messages into the FMQ to fill half of it
940 * and read back the same.
941 * Write mNumMessagesMax messages into the queue. This will cause a
942 * wrap around. Read and verify the data.
943 */
TYPED_TEST(SynchronizedReadWrites,ReadWriteWrapAround2)944 TYPED_TEST(SynchronizedReadWrites, ReadWriteWrapAround2) {
945 size_t dataLen = this->mNumMessagesMax - 1;
946 std::vector<uint8_t> data(this->mNumMessagesMax);
947 std::vector<uint8_t> readData(this->mNumMessagesMax);
948 initData(&data[0], this->mNumMessagesMax);
949 ASSERT_TRUE(this->mQueue->write(&data[0], dataLen));
950 ASSERT_TRUE(this->mQueue->read(&readData[0], dataLen));
951
952 /*
953 * The next write and read will have to deal with with wrap arounds.
954 */
955 typename TypeParam::MQType::MemTransaction tx;
956 ASSERT_TRUE(this->mQueue->beginWrite(this->mNumMessagesMax, &tx));
957
958 auto first = tx.getFirstRegion();
959 auto second = tx.getSecondRegion();
960
961 ASSERT_EQ(first.getLength() + second.getLength(), this->mNumMessagesMax);
962
963 ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */, this->mNumMessagesMax));
964
965 ASSERT_TRUE(this->mQueue->commitWrite(this->mNumMessagesMax));
966
967 ASSERT_TRUE(this->mQueue->beginRead(this->mNumMessagesMax, &tx));
968
969 first = tx.getFirstRegion();
970 second = tx.getSecondRegion();
971
972 ASSERT_EQ(first.getLength() + second.getLength(), this->mNumMessagesMax);
973
974 ASSERT_TRUE(tx.copyFrom(&readData[0], 0 /* startIdx */, this->mNumMessagesMax));
975 ASSERT_TRUE(this->mQueue->commitRead(this->mNumMessagesMax));
976
977 ASSERT_EQ(data, readData);
978 }
979
980 /*
981 * Verify that a few bytes of data can be successfully written and read.
982 */
TYPED_TEST(UnsynchronizedWriteTest,SmallInputTest1)983 TYPED_TEST(UnsynchronizedWriteTest, SmallInputTest1) {
984 const size_t dataLen = 16;
985 ASSERT_LE(dataLen, this->mNumMessagesMax);
986 uint8_t data[dataLen];
987
988 initData(data, dataLen);
989 ASSERT_TRUE(this->mQueue->write(data, dataLen));
990 uint8_t readData[dataLen] = {};
991 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
992 ASSERT_EQ(0, memcmp(data, readData, dataLen));
993 }
994
995 /*
996 * Verify that read() returns false when trying to read from an empty queue.
997 */
TYPED_TEST(UnsynchronizedWriteTest,ReadWhenEmpty)998 TYPED_TEST(UnsynchronizedWriteTest, ReadWhenEmpty) {
999 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1000 const size_t dataLen = 2;
1001 ASSERT_TRUE(dataLen < this->mNumMessagesMax);
1002 uint8_t readData[dataLen];
1003 ASSERT_FALSE(this->mQueue->read(readData, dataLen));
1004 }
1005
1006 /*
1007 * Write the queue when full. Verify that a subsequent writes is succesful.
1008 * Verify that availableToWrite() returns 0 as expected.
1009 */
TYPED_TEST(UnsynchronizedWriteTest,WriteWhenFull1)1010 TYPED_TEST(UnsynchronizedWriteTest, WriteWhenFull1) {
1011 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1012 std::vector<uint8_t> data(this->mNumMessagesMax);
1013
1014 initData(&data[0], this->mNumMessagesMax);
1015 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1016 ASSERT_EQ(0UL, this->mQueue->availableToWrite());
1017 ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1018
1019 std::vector<uint8_t> readData(this->mNumMessagesMax);
1020 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1021 }
1022
1023 /*
1024 * Write the queue when full. Verify that a subsequent writes
1025 * using beginRead()/commitRead() is succesful.
1026 * Verify that the next read fails as expected for unsynchronized flavor.
1027 */
TYPED_TEST(UnsynchronizedWriteTest,WriteWhenFull2)1028 TYPED_TEST(UnsynchronizedWriteTest, WriteWhenFull2) {
1029 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1030 std::vector<uint8_t> data(this->mNumMessagesMax);
1031 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1032
1033 typename TypeParam::MQType::MemTransaction tx;
1034 ASSERT_TRUE(this->mQueue->beginWrite(1, &tx));
1035
1036 ASSERT_EQ(tx.getFirstRegion().getLength(), 1U);
1037
1038 ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */));
1039
1040 ASSERT_TRUE(this->mQueue->commitWrite(1));
1041
1042 std::vector<uint8_t> readData(this->mNumMessagesMax);
1043 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1044 }
1045
1046 /*
1047 * Write a chunk of data equal to the queue size.
1048 * Verify that the write is successful and the subsequent read
1049 * returns the expected data.
1050 */
TYPED_TEST(UnsynchronizedWriteTest,LargeInputTest1)1051 TYPED_TEST(UnsynchronizedWriteTest, LargeInputTest1) {
1052 std::vector<uint8_t> data(this->mNumMessagesMax);
1053 initData(&data[0], this->mNumMessagesMax);
1054 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1055 std::vector<uint8_t> readData(this->mNumMessagesMax);
1056 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1057 ASSERT_EQ(data, readData);
1058 }
1059
1060 /*
1061 * Attempt to write a chunk of data larger than the queue size.
1062 * Verify that it fails. Verify that a subsequent read fails and
1063 * the queue is still empty.
1064 */
TYPED_TEST(UnsynchronizedWriteTest,LargeInputTest2)1065 TYPED_TEST(UnsynchronizedWriteTest, LargeInputTest2) {
1066 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1067 const size_t dataLen = 4096;
1068 ASSERT_GT(dataLen, this->mNumMessagesMax);
1069 std::vector<uint8_t> data(dataLen);
1070 initData(&data[0], dataLen);
1071 ASSERT_FALSE(this->mQueue->write(&data[0], dataLen));
1072 std::vector<uint8_t> readData(this->mNumMessagesMax);
1073 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1074 ASSERT_NE(data, readData);
1075 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1076 }
1077
1078 /*
1079 * After the queue is full, try to write more data. Verify that
1080 * the attempt is succesful. Verify that the read fails
1081 * as expected.
1082 */
TYPED_TEST(UnsynchronizedWriteTest,LargeInputTest3)1083 TYPED_TEST(UnsynchronizedWriteTest, LargeInputTest3) {
1084 std::vector<uint8_t> data(this->mNumMessagesMax);
1085 initData(&data[0], this->mNumMessagesMax);
1086 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1087 ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1088 std::vector<uint8_t> readData(this->mNumMessagesMax);
1089 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1090 }
1091
1092 /*
1093 * Verify that multiple reads one after the other return expected data.
1094 */
TYPED_TEST(UnsynchronizedWriteTest,MultipleRead)1095 TYPED_TEST(UnsynchronizedWriteTest, MultipleRead) {
1096 const size_t chunkSize = 100;
1097 const size_t chunkNum = 5;
1098 const size_t dataLen = chunkSize * chunkNum;
1099 ASSERT_LE(dataLen, this->mNumMessagesMax);
1100 uint8_t data[dataLen];
1101 initData(data, dataLen);
1102 ASSERT_TRUE(this->mQueue->write(data, dataLen));
1103 uint8_t readData[dataLen] = {};
1104 for (size_t i = 0; i < chunkNum; i++) {
1105 ASSERT_TRUE(this->mQueue->read(readData + i * chunkSize, chunkSize));
1106 }
1107 ASSERT_EQ(0, memcmp(readData, data, dataLen));
1108 }
1109
1110 /*
1111 * Verify that multiple writes one after the other happens correctly.
1112 */
TYPED_TEST(UnsynchronizedWriteTest,MultipleWrite)1113 TYPED_TEST(UnsynchronizedWriteTest, MultipleWrite) {
1114 const size_t chunkSize = 100;
1115 const size_t chunkNum = 5;
1116 const size_t dataLen = chunkSize * chunkNum;
1117 ASSERT_LE(dataLen, this->mNumMessagesMax);
1118 uint8_t data[dataLen];
1119
1120 initData(data, dataLen);
1121 for (size_t i = 0; i < chunkNum; i++) {
1122 ASSERT_TRUE(this->mQueue->write(data + i * chunkSize, chunkSize));
1123 }
1124
1125 uint8_t readData[dataLen] = {};
1126 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
1127 ASSERT_EQ(0, memcmp(readData, data, dataLen));
1128 }
1129
1130 /*
1131 * Write enough messages into the FMQ to fill half of it
1132 * and read back the same.
1133 * Write mNumMessagesMax messages into the queue. This will cause a
1134 * wrap around. Read and verify the data.
1135 */
TYPED_TEST(UnsynchronizedWriteTest,ReadWriteWrapAround)1136 TYPED_TEST(UnsynchronizedWriteTest, ReadWriteWrapAround) {
1137 size_t numMessages = this->mNumMessagesMax - 1;
1138 std::vector<uint8_t> data(this->mNumMessagesMax);
1139 std::vector<uint8_t> readData(this->mNumMessagesMax);
1140
1141 initData(&data[0], this->mNumMessagesMax);
1142 ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
1143 ASSERT_TRUE(this->mQueue->read(&readData[0], numMessages));
1144 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1145 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1146 ASSERT_EQ(data, readData);
1147 }
1148