1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <fstream>
20 #include <poll.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <thread>
25
26 #include <gmock/gmock.h>
27 #include <gtest/gtest.h>
28
29 #include <binder/Binder.h>
30 #include <binder/IBinder.h>
31 #include <binder/IPCThreadState.h>
32 #include <binder/IServiceManager.h>
33
34 #include <linux/sched.h>
35 #include <sys/epoll.h>
36 #include <sys/prctl.h>
37
38 #include "../binder_module.h"
39 #include "binderAbiHelper.h"
40
41 #define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
42
43 using namespace android;
44 using testing::Not;
45
46 // e.g. EXPECT_THAT(expr, StatusEq(OK)) << "additional message";
47 MATCHER_P(StatusEq, expected, (negation ? "not " : "") + statusToString(expected)) {
48 *result_listener << statusToString(arg);
49 return expected == arg;
50 }
51
IsPageAligned(void * buf)52 static ::testing::AssertionResult IsPageAligned(void *buf) {
53 if (((unsigned long)buf & ((unsigned long)PAGE_SIZE - 1)) == 0)
54 return ::testing::AssertionSuccess();
55 else
56 return ::testing::AssertionFailure() << buf << " is not page aligned";
57 }
58
59 static testing::Environment* binder_env;
60 static char *binderservername;
61 static char *binderserversuffix;
62 static char binderserverarg[] = "--binderserver";
63
64 static constexpr int kSchedPolicy = SCHED_RR;
65 static constexpr int kSchedPriority = 7;
66 static constexpr int kSchedPriorityMore = 8;
67
68 static String16 binderLibTestServiceName = String16("test.binderLib");
69
70 enum BinderLibTestTranscationCode {
71 BINDER_LIB_TEST_NOP_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
72 BINDER_LIB_TEST_REGISTER_SERVER,
73 BINDER_LIB_TEST_ADD_SERVER,
74 BINDER_LIB_TEST_ADD_POLL_SERVER,
75 BINDER_LIB_TEST_CALL_BACK,
76 BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF,
77 BINDER_LIB_TEST_DELAYED_CALL_BACK,
78 BINDER_LIB_TEST_NOP_CALL_BACK,
79 BINDER_LIB_TEST_GET_SELF_TRANSACTION,
80 BINDER_LIB_TEST_GET_ID_TRANSACTION,
81 BINDER_LIB_TEST_INDIRECT_TRANSACTION,
82 BINDER_LIB_TEST_SET_ERROR_TRANSACTION,
83 BINDER_LIB_TEST_GET_STATUS_TRANSACTION,
84 BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION,
85 BINDER_LIB_TEST_LINK_DEATH_TRANSACTION,
86 BINDER_LIB_TEST_WRITE_FILE_TRANSACTION,
87 BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION,
88 BINDER_LIB_TEST_EXIT_TRANSACTION,
89 BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION,
90 BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION,
91 BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION,
92 BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION,
93 BINDER_LIB_TEST_GET_SCHEDULING_POLICY,
94 BINDER_LIB_TEST_NOP_TRANSACTION_WAIT,
95 BINDER_LIB_TEST_GETPID,
96 BINDER_LIB_TEST_ECHO_VECTOR,
97 BINDER_LIB_TEST_REJECT_OBJECTS,
98 BINDER_LIB_TEST_CAN_GET_SID,
99 };
100
start_server_process(int arg2,bool usePoll=false)101 pid_t start_server_process(int arg2, bool usePoll = false)
102 {
103 int ret;
104 pid_t pid;
105 status_t status;
106 int pipefd[2];
107 char stri[16];
108 char strpipefd1[16];
109 char usepoll[2];
110 char *childargv[] = {
111 binderservername,
112 binderserverarg,
113 stri,
114 strpipefd1,
115 usepoll,
116 binderserversuffix,
117 nullptr
118 };
119
120 ret = pipe(pipefd);
121 if (ret < 0)
122 return ret;
123
124 snprintf(stri, sizeof(stri), "%d", arg2);
125 snprintf(strpipefd1, sizeof(strpipefd1), "%d", pipefd[1]);
126 snprintf(usepoll, sizeof(usepoll), "%d", usePoll ? 1 : 0);
127
128 pid = fork();
129 if (pid == -1)
130 return pid;
131 if (pid == 0) {
132 prctl(PR_SET_PDEATHSIG, SIGHUP);
133 close(pipefd[0]);
134 execv(binderservername, childargv);
135 status = -errno;
136 write(pipefd[1], &status, sizeof(status));
137 fprintf(stderr, "execv failed, %s\n", strerror(errno));
138 _exit(EXIT_FAILURE);
139 }
140 close(pipefd[1]);
141 ret = read(pipefd[0], &status, sizeof(status));
142 //printf("pipe read returned %d, status %d\n", ret, status);
143 close(pipefd[0]);
144 if (ret == sizeof(status)) {
145 ret = status;
146 } else {
147 kill(pid, SIGKILL);
148 if (ret >= 0) {
149 ret = NO_INIT;
150 }
151 }
152 if (ret < 0) {
153 wait(nullptr);
154 return ret;
155 }
156 return pid;
157 }
158
159 class BinderLibTestEnv : public ::testing::Environment {
160 public:
BinderLibTestEnv()161 BinderLibTestEnv() {}
getServer(void)162 sp<IBinder> getServer(void) {
163 return m_server;
164 }
165
166 private:
SetUp()167 virtual void SetUp() {
168 m_serverpid = start_server_process(0);
169 //printf("m_serverpid %d\n", m_serverpid);
170 ASSERT_GT(m_serverpid, 0);
171
172 sp<IServiceManager> sm = defaultServiceManager();
173 //printf("%s: pid %d, get service\n", __func__, m_pid);
174 m_server = sm->getService(binderLibTestServiceName);
175 ASSERT_TRUE(m_server != nullptr);
176 //printf("%s: pid %d, get service done\n", __func__, m_pid);
177 }
TearDown()178 virtual void TearDown() {
179 status_t ret;
180 Parcel data, reply;
181 int exitStatus;
182 pid_t pid;
183
184 //printf("%s: pid %d\n", __func__, m_pid);
185 if (m_server != nullptr) {
186 ret = m_server->transact(BINDER_LIB_TEST_GET_STATUS_TRANSACTION, data, &reply);
187 EXPECT_EQ(0, ret);
188 ret = m_server->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
189 EXPECT_EQ(0, ret);
190 }
191 if (m_serverpid > 0) {
192 //printf("wait for %d\n", m_pids[i]);
193 pid = wait(&exitStatus);
194 EXPECT_EQ(m_serverpid, pid);
195 EXPECT_TRUE(WIFEXITED(exitStatus));
196 EXPECT_EQ(0, WEXITSTATUS(exitStatus));
197 }
198 }
199
200 pid_t m_serverpid;
201 sp<IBinder> m_server;
202 };
203
204 class BinderLibTest : public ::testing::Test {
205 public:
SetUp()206 virtual void SetUp() {
207 m_server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
208 IPCThreadState::self()->restoreCallingWorkSource(0);
209 }
TearDown()210 virtual void TearDown() {
211 }
212 protected:
addServerEtc(int32_t * idPtr,int code)213 sp<IBinder> addServerEtc(int32_t *idPtr, int code)
214 {
215 int32_t id;
216 Parcel data, reply;
217 sp<IBinder> binder;
218
219 EXPECT_THAT(m_server->transact(code, data, &reply), StatusEq(NO_ERROR));
220
221 EXPECT_FALSE(binder != nullptr);
222 binder = reply.readStrongBinder();
223 EXPECT_TRUE(binder != nullptr);
224 EXPECT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
225 if (idPtr)
226 *idPtr = id;
227 return binder;
228 }
229
addServer(int32_t * idPtr=nullptr)230 sp<IBinder> addServer(int32_t *idPtr = nullptr)
231 {
232 return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_SERVER);
233 }
234
addPollServer(int32_t * idPtr=nullptr)235 sp<IBinder> addPollServer(int32_t *idPtr = nullptr)
236 {
237 return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_POLL_SERVER);
238 }
239
waitForReadData(int fd,int timeout_ms)240 void waitForReadData(int fd, int timeout_ms) {
241 int ret;
242 pollfd pfd = pollfd();
243
244 pfd.fd = fd;
245 pfd.events = POLLIN;
246 ret = poll(&pfd, 1, timeout_ms);
247 EXPECT_EQ(1, ret);
248 }
249
250 sp<IBinder> m_server;
251 };
252
253 class BinderLibTestBundle : public Parcel
254 {
255 public:
BinderLibTestBundle(void)256 BinderLibTestBundle(void) {}
BinderLibTestBundle(const Parcel * source)257 explicit BinderLibTestBundle(const Parcel *source) : m_isValid(false) {
258 int32_t mark;
259 int32_t bundleLen;
260 size_t pos;
261
262 if (source->readInt32(&mark))
263 return;
264 if (mark != MARK_START)
265 return;
266 if (source->readInt32(&bundleLen))
267 return;
268 pos = source->dataPosition();
269 if (Parcel::appendFrom(source, pos, bundleLen))
270 return;
271 source->setDataPosition(pos + bundleLen);
272 if (source->readInt32(&mark))
273 return;
274 if (mark != MARK_END)
275 return;
276 m_isValid = true;
277 setDataPosition(0);
278 }
appendTo(Parcel * dest)279 void appendTo(Parcel *dest) {
280 dest->writeInt32(MARK_START);
281 dest->writeInt32(dataSize());
282 dest->appendFrom(this, 0, dataSize());
283 dest->writeInt32(MARK_END);
284 };
isValid(void)285 bool isValid(void) {
286 return m_isValid;
287 }
288 private:
289 enum {
290 MARK_START = B_PACK_CHARS('B','T','B','S'),
291 MARK_END = B_PACK_CHARS('B','T','B','E'),
292 };
293 bool m_isValid;
294 };
295
296 class BinderLibTestEvent
297 {
298 public:
BinderLibTestEvent(void)299 BinderLibTestEvent(void)
300 : m_eventTriggered(false)
301 {
302 pthread_mutex_init(&m_waitMutex, nullptr);
303 pthread_cond_init(&m_waitCond, nullptr);
304 }
waitEvent(int timeout_s)305 int waitEvent(int timeout_s)
306 {
307 int ret;
308 pthread_mutex_lock(&m_waitMutex);
309 if (!m_eventTriggered) {
310 struct timespec ts;
311 clock_gettime(CLOCK_REALTIME, &ts);
312 ts.tv_sec += timeout_s;
313 pthread_cond_timedwait(&m_waitCond, &m_waitMutex, &ts);
314 }
315 ret = m_eventTriggered ? NO_ERROR : TIMED_OUT;
316 pthread_mutex_unlock(&m_waitMutex);
317 return ret;
318 }
getTriggeringThread()319 pthread_t getTriggeringThread()
320 {
321 return m_triggeringThread;
322 }
323 protected:
triggerEvent(void)324 void triggerEvent(void) {
325 pthread_mutex_lock(&m_waitMutex);
326 pthread_cond_signal(&m_waitCond);
327 m_eventTriggered = true;
328 m_triggeringThread = pthread_self();
329 pthread_mutex_unlock(&m_waitMutex);
330 };
331 private:
332 pthread_mutex_t m_waitMutex;
333 pthread_cond_t m_waitCond;
334 bool m_eventTriggered;
335 pthread_t m_triggeringThread;
336 };
337
338 class BinderLibTestCallBack : public BBinder, public BinderLibTestEvent
339 {
340 public:
BinderLibTestCallBack()341 BinderLibTestCallBack()
342 : m_result(NOT_ENOUGH_DATA)
343 , m_prev_end(nullptr)
344 {
345 }
getResult(void)346 status_t getResult(void)
347 {
348 return m_result;
349 }
350
351 private:
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)352 virtual status_t onTransact(uint32_t code,
353 const Parcel& data, Parcel* reply,
354 uint32_t flags = 0)
355 {
356 (void)reply;
357 (void)flags;
358 switch(code) {
359 case BINDER_LIB_TEST_CALL_BACK: {
360 status_t status = data.readInt32(&m_result);
361 if (status != NO_ERROR) {
362 m_result = status;
363 }
364 triggerEvent();
365 return NO_ERROR;
366 }
367 case BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF: {
368 sp<IBinder> server;
369 int ret;
370 const uint8_t *buf = data.data();
371 size_t size = data.dataSize();
372 if (m_prev_end) {
373 /* 64-bit kernel needs at most 8 bytes to align buffer end */
374 EXPECT_LE((size_t)(buf - m_prev_end), (size_t)8);
375 } else {
376 EXPECT_TRUE(IsPageAligned((void *)buf));
377 }
378
379 m_prev_end = buf + size + data.objectsCount() * sizeof(binder_size_t);
380
381 if (size > 0) {
382 server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
383 ret = server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION,
384 data, reply);
385 EXPECT_EQ(NO_ERROR, ret);
386 }
387 return NO_ERROR;
388 }
389 default:
390 return UNKNOWN_TRANSACTION;
391 }
392 }
393
394 status_t m_result;
395 const uint8_t *m_prev_end;
396 };
397
398 class TestDeathRecipient : public IBinder::DeathRecipient, public BinderLibTestEvent
399 {
400 private:
binderDied(const wp<IBinder> & who)401 virtual void binderDied(const wp<IBinder>& who) {
402 (void)who;
403 triggerEvent();
404 };
405 };
406
TEST_F(BinderLibTest,NopTransaction)407 TEST_F(BinderLibTest, NopTransaction) {
408 Parcel data, reply;
409 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply),
410 StatusEq(NO_ERROR));
411 }
412
TEST_F(BinderLibTest,NopTransactionOneway)413 TEST_F(BinderLibTest, NopTransactionOneway) {
414 Parcel data, reply;
415 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply, TF_ONE_WAY),
416 StatusEq(NO_ERROR));
417 }
418
TEST_F(BinderLibTest,NopTransactionClear)419 TEST_F(BinderLibTest, NopTransactionClear) {
420 Parcel data, reply;
421 // make sure it accepts the transaction flag
422 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply, TF_CLEAR_BUF),
423 StatusEq(NO_ERROR));
424 }
425
TEST_F(BinderLibTest,Freeze)426 TEST_F(BinderLibTest, Freeze) {
427 Parcel data, reply, replypid;
428 std::ifstream freezer_file("/sys/fs/cgroup/uid_0/cgroup.freeze");
429
430 // Pass test on devices where the cgroup v2 freezer is not supported
431 if (freezer_file.fail()) {
432 GTEST_SKIP();
433 return;
434 }
435
436 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GETPID, data, &replypid), StatusEq(NO_ERROR));
437 int32_t pid = replypid.readInt32();
438 for (int i = 0; i < 10; i++) {
439 EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION_WAIT, data, &reply, TF_ONE_WAY));
440 }
441
442 // Pass test on devices where BINDER_FREEZE ioctl is not supported
443 int ret = IPCThreadState::self()->freeze(pid, false, 0);
444 if (ret != 0) {
445 GTEST_SKIP();
446 return;
447 }
448
449 EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, true, 0));
450 EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, true, 0));
451 EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, true, 1000));
452 EXPECT_EQ(FAILED_TRANSACTION, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
453
454 bool sync_received, async_received;
455
456 EXPECT_EQ(NO_ERROR, IPCThreadState::self()->getProcessFreezeInfo(pid, &sync_received,
457 &async_received));
458
459 EXPECT_EQ(sync_received, 1);
460 EXPECT_EQ(async_received, 0);
461
462 uint32_t sync_received2, async_received2;
463
464 EXPECT_EQ(NO_ERROR, IPCThreadState::self()->getProcessFreezeInfo(pid, &sync_received2,
465 &async_received2));
466
467 EXPECT_EQ(sync_received2, 1);
468 EXPECT_EQ(async_received2, 0);
469
470 EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, 0, 0));
471 EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
472 }
473
TEST_F(BinderLibTest,SetError)474 TEST_F(BinderLibTest, SetError) {
475 int32_t testValue[] = { 0, -123, 123 };
476 for (size_t i = 0; i < ARRAY_SIZE(testValue); i++) {
477 Parcel data, reply;
478 data.writeInt32(testValue[i]);
479 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_SET_ERROR_TRANSACTION, data, &reply),
480 StatusEq(testValue[i]));
481 }
482 }
483
TEST_F(BinderLibTest,GetId)484 TEST_F(BinderLibTest, GetId) {
485 int32_t id;
486 Parcel data, reply;
487 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GET_ID_TRANSACTION, data, &reply),
488 StatusEq(NO_ERROR));
489 EXPECT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
490 EXPECT_EQ(0, id);
491 }
492
TEST_F(BinderLibTest,PtrSize)493 TEST_F(BinderLibTest, PtrSize) {
494 int32_t ptrsize;
495 Parcel data, reply;
496 sp<IBinder> server = addServer();
497 ASSERT_TRUE(server != nullptr);
498 EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION, data, &reply),
499 StatusEq(NO_ERROR));
500 EXPECT_THAT(reply.readInt32(&ptrsize), StatusEq(NO_ERROR));
501 RecordProperty("TestPtrSize", sizeof(void *));
502 RecordProperty("ServerPtrSize", sizeof(void *));
503 }
504
TEST_F(BinderLibTest,IndirectGetId2)505 TEST_F(BinderLibTest, IndirectGetId2)
506 {
507 int32_t id;
508 int32_t count;
509 Parcel data, reply;
510 int32_t serverId[3];
511
512 data.writeInt32(ARRAY_SIZE(serverId));
513 for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
514 sp<IBinder> server;
515 BinderLibTestBundle datai;
516
517 server = addServer(&serverId[i]);
518 ASSERT_TRUE(server != nullptr);
519 data.writeStrongBinder(server);
520 data.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
521 datai.appendTo(&data);
522 }
523
524 ASSERT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
525 StatusEq(NO_ERROR));
526
527 ASSERT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
528 EXPECT_EQ(0, id);
529
530 ASSERT_THAT(reply.readInt32(&count), StatusEq(NO_ERROR));
531 EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
532
533 for (size_t i = 0; i < (size_t)count; i++) {
534 BinderLibTestBundle replyi(&reply);
535 EXPECT_TRUE(replyi.isValid());
536 EXPECT_THAT(replyi.readInt32(&id), StatusEq(NO_ERROR));
537 EXPECT_EQ(serverId[i], id);
538 EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
539 }
540
541 EXPECT_EQ(reply.dataSize(), reply.dataPosition());
542 }
543
TEST_F(BinderLibTest,IndirectGetId3)544 TEST_F(BinderLibTest, IndirectGetId3)
545 {
546 int32_t id;
547 int32_t count;
548 Parcel data, reply;
549 int32_t serverId[3];
550
551 data.writeInt32(ARRAY_SIZE(serverId));
552 for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
553 sp<IBinder> server;
554 BinderLibTestBundle datai;
555 BinderLibTestBundle datai2;
556
557 server = addServer(&serverId[i]);
558 ASSERT_TRUE(server != nullptr);
559 data.writeStrongBinder(server);
560 data.writeInt32(BINDER_LIB_TEST_INDIRECT_TRANSACTION);
561
562 datai.writeInt32(1);
563 datai.writeStrongBinder(m_server);
564 datai.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
565 datai2.appendTo(&datai);
566
567 datai.appendTo(&data);
568 }
569
570 ASSERT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
571 StatusEq(NO_ERROR));
572
573 ASSERT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
574 EXPECT_EQ(0, id);
575
576 ASSERT_THAT(reply.readInt32(&count), StatusEq(NO_ERROR));
577 EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
578
579 for (size_t i = 0; i < (size_t)count; i++) {
580 int32_t counti;
581
582 BinderLibTestBundle replyi(&reply);
583 EXPECT_TRUE(replyi.isValid());
584 EXPECT_THAT(replyi.readInt32(&id), StatusEq(NO_ERROR));
585 EXPECT_EQ(serverId[i], id);
586
587 ASSERT_THAT(replyi.readInt32(&counti), StatusEq(NO_ERROR));
588 EXPECT_EQ(1, counti);
589
590 BinderLibTestBundle replyi2(&replyi);
591 EXPECT_TRUE(replyi2.isValid());
592 EXPECT_THAT(replyi2.readInt32(&id), StatusEq(NO_ERROR));
593 EXPECT_EQ(0, id);
594 EXPECT_EQ(replyi2.dataSize(), replyi2.dataPosition());
595
596 EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
597 }
598
599 EXPECT_EQ(reply.dataSize(), reply.dataPosition());
600 }
601
TEST_F(BinderLibTest,CallBack)602 TEST_F(BinderLibTest, CallBack)
603 {
604 Parcel data, reply;
605 sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
606 data.writeStrongBinder(callBack);
607 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_CALL_BACK, data, &reply, TF_ONE_WAY),
608 StatusEq(NO_ERROR));
609 EXPECT_THAT(callBack->waitEvent(5), StatusEq(NO_ERROR));
610 EXPECT_THAT(callBack->getResult(), StatusEq(NO_ERROR));
611 }
612
TEST_F(BinderLibTest,AddServer)613 TEST_F(BinderLibTest, AddServer)
614 {
615 sp<IBinder> server = addServer();
616 ASSERT_TRUE(server != nullptr);
617 }
618
TEST_F(BinderLibTest,DeathNotificationStrongRef)619 TEST_F(BinderLibTest, DeathNotificationStrongRef)
620 {
621 sp<IBinder> sbinder;
622
623 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
624
625 {
626 sp<IBinder> binder = addServer();
627 ASSERT_TRUE(binder != nullptr);
628 EXPECT_THAT(binder->linkToDeath(testDeathRecipient), StatusEq(NO_ERROR));
629 sbinder = binder;
630 }
631 {
632 Parcel data, reply;
633 EXPECT_THAT(sbinder->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY),
634 StatusEq(OK));
635 }
636 IPCThreadState::self()->flushCommands();
637 EXPECT_THAT(testDeathRecipient->waitEvent(5), StatusEq(NO_ERROR));
638 EXPECT_THAT(sbinder->unlinkToDeath(testDeathRecipient), StatusEq(DEAD_OBJECT));
639 }
640
TEST_F(BinderLibTest,DeathNotificationMultiple)641 TEST_F(BinderLibTest, DeathNotificationMultiple)
642 {
643 status_t ret;
644 const int clientcount = 2;
645 sp<IBinder> target;
646 sp<IBinder> linkedclient[clientcount];
647 sp<BinderLibTestCallBack> callBack[clientcount];
648 sp<IBinder> passiveclient[clientcount];
649
650 target = addServer();
651 ASSERT_TRUE(target != nullptr);
652 for (int i = 0; i < clientcount; i++) {
653 {
654 Parcel data, reply;
655
656 linkedclient[i] = addServer();
657 ASSERT_TRUE(linkedclient[i] != nullptr);
658 callBack[i] = new BinderLibTestCallBack();
659 data.writeStrongBinder(target);
660 data.writeStrongBinder(callBack[i]);
661 EXPECT_THAT(linkedclient[i]->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data,
662 &reply, TF_ONE_WAY),
663 StatusEq(NO_ERROR));
664 }
665 {
666 Parcel data, reply;
667
668 passiveclient[i] = addServer();
669 ASSERT_TRUE(passiveclient[i] != nullptr);
670 data.writeStrongBinder(target);
671 EXPECT_THAT(passiveclient[i]->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data,
672 &reply, TF_ONE_WAY),
673 StatusEq(NO_ERROR));
674 }
675 }
676 {
677 Parcel data, reply;
678 ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
679 EXPECT_EQ(0, ret);
680 }
681
682 for (int i = 0; i < clientcount; i++) {
683 EXPECT_THAT(callBack[i]->waitEvent(5), StatusEq(NO_ERROR));
684 EXPECT_THAT(callBack[i]->getResult(), StatusEq(NO_ERROR));
685 }
686 }
687
TEST_F(BinderLibTest,DeathNotificationThread)688 TEST_F(BinderLibTest, DeathNotificationThread)
689 {
690 status_t ret;
691 sp<BinderLibTestCallBack> callback;
692 sp<IBinder> target = addServer();
693 ASSERT_TRUE(target != nullptr);
694 sp<IBinder> client = addServer();
695 ASSERT_TRUE(client != nullptr);
696
697 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
698
699 EXPECT_THAT(target->linkToDeath(testDeathRecipient), StatusEq(NO_ERROR));
700
701 {
702 Parcel data, reply;
703 ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
704 EXPECT_EQ(0, ret);
705 }
706
707 /* Make sure it's dead */
708 testDeathRecipient->waitEvent(5);
709
710 /* Now, pass the ref to another process and ask that process to
711 * call linkToDeath() on it, and wait for a response. This tests
712 * two things:
713 * 1) You still get death notifications when calling linkToDeath()
714 * on a ref that is already dead when it was passed to you.
715 * 2) That death notifications are not directly pushed to the thread
716 * registering them, but to the threadpool (proc workqueue) instead.
717 *
718 * 2) is tested because the thread handling BINDER_LIB_TEST_DEATH_TRANSACTION
719 * is blocked on a condition variable waiting for the death notification to be
720 * called; therefore, that thread is not available for handling proc work.
721 * So, if the death notification was pushed to the thread workqueue, the callback
722 * would never be called, and the test would timeout and fail.
723 *
724 * Note that we can't do this part of the test from this thread itself, because
725 * the binder driver would only push death notifications to the thread if
726 * it is a looper thread, which this thread is not.
727 *
728 * See b/23525545 for details.
729 */
730 {
731 Parcel data, reply;
732
733 callback = new BinderLibTestCallBack();
734 data.writeStrongBinder(target);
735 data.writeStrongBinder(callback);
736 EXPECT_THAT(client->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data, &reply,
737 TF_ONE_WAY),
738 StatusEq(NO_ERROR));
739 }
740
741 EXPECT_THAT(callback->waitEvent(5), StatusEq(NO_ERROR));
742 EXPECT_THAT(callback->getResult(), StatusEq(NO_ERROR));
743 }
744
TEST_F(BinderLibTest,PassFile)745 TEST_F(BinderLibTest, PassFile) {
746 int ret;
747 int pipefd[2];
748 uint8_t buf[1] = { 0 };
749 uint8_t write_value = 123;
750
751 ret = pipe2(pipefd, O_NONBLOCK);
752 ASSERT_EQ(0, ret);
753
754 {
755 Parcel data, reply;
756 uint8_t writebuf[1] = { write_value };
757
758 EXPECT_THAT(data.writeFileDescriptor(pipefd[1], true), StatusEq(NO_ERROR));
759
760 EXPECT_THAT(data.writeInt32(sizeof(writebuf)), StatusEq(NO_ERROR));
761
762 EXPECT_THAT(data.write(writebuf, sizeof(writebuf)), StatusEq(NO_ERROR));
763
764 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_WRITE_FILE_TRANSACTION, data, &reply),
765 StatusEq(NO_ERROR));
766 }
767
768 ret = read(pipefd[0], buf, sizeof(buf));
769 EXPECT_EQ(sizeof(buf), (size_t)ret);
770 EXPECT_EQ(write_value, buf[0]);
771
772 waitForReadData(pipefd[0], 5000); /* wait for other proccess to close pipe */
773
774 ret = read(pipefd[0], buf, sizeof(buf));
775 EXPECT_EQ(0, ret);
776
777 close(pipefd[0]);
778 }
779
TEST_F(BinderLibTest,PassParcelFileDescriptor)780 TEST_F(BinderLibTest, PassParcelFileDescriptor) {
781 const int datasize = 123;
782 std::vector<uint8_t> writebuf(datasize);
783 for (size_t i = 0; i < writebuf.size(); ++i) {
784 writebuf[i] = i;
785 }
786
787 android::base::unique_fd read_end, write_end;
788 {
789 int pipefd[2];
790 ASSERT_EQ(0, pipe2(pipefd, O_NONBLOCK));
791 read_end.reset(pipefd[0]);
792 write_end.reset(pipefd[1]);
793 }
794 {
795 Parcel data;
796 EXPECT_EQ(NO_ERROR, data.writeDupParcelFileDescriptor(write_end.get()));
797 write_end.reset();
798 EXPECT_EQ(NO_ERROR, data.writeInt32(datasize));
799 EXPECT_EQ(NO_ERROR, data.write(writebuf.data(), datasize));
800
801 Parcel reply;
802 EXPECT_EQ(NO_ERROR,
803 m_server->transact(BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION, data,
804 &reply));
805 }
806 std::vector<uint8_t> readbuf(datasize);
807 EXPECT_EQ(datasize, read(read_end.get(), readbuf.data(), datasize));
808 EXPECT_EQ(writebuf, readbuf);
809
810 waitForReadData(read_end.get(), 5000); /* wait for other proccess to close pipe */
811
812 EXPECT_EQ(0, read(read_end.get(), readbuf.data(), datasize));
813 }
814
TEST_F(BinderLibTest,PromoteLocal)815 TEST_F(BinderLibTest, PromoteLocal) {
816 sp<IBinder> strong = new BBinder();
817 wp<IBinder> weak = strong;
818 sp<IBinder> strong_from_weak = weak.promote();
819 EXPECT_TRUE(strong != nullptr);
820 EXPECT_EQ(strong, strong_from_weak);
821 strong = nullptr;
822 strong_from_weak = nullptr;
823 strong_from_weak = weak.promote();
824 EXPECT_TRUE(strong_from_weak == nullptr);
825 }
826
TEST_F(BinderLibTest,LocalGetExtension)827 TEST_F(BinderLibTest, LocalGetExtension) {
828 sp<BBinder> binder = new BBinder();
829 sp<IBinder> ext = new BBinder();
830 binder->setExtension(ext);
831 EXPECT_EQ(ext, binder->getExtension());
832 }
833
TEST_F(BinderLibTest,RemoteGetExtension)834 TEST_F(BinderLibTest, RemoteGetExtension) {
835 sp<IBinder> server = addServer();
836 ASSERT_TRUE(server != nullptr);
837
838 sp<IBinder> extension;
839 EXPECT_EQ(NO_ERROR, server->getExtension(&extension));
840 ASSERT_NE(nullptr, extension.get());
841
842 EXPECT_EQ(NO_ERROR, extension->pingBinder());
843 }
844
TEST_F(BinderLibTest,CheckHandleZeroBinderHighBitsZeroCookie)845 TEST_F(BinderLibTest, CheckHandleZeroBinderHighBitsZeroCookie) {
846 Parcel data, reply;
847
848 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GET_SELF_TRANSACTION, data, &reply),
849 StatusEq(NO_ERROR));
850
851 const flat_binder_object *fb = reply.readObject(false);
852 ASSERT_TRUE(fb != nullptr);
853 EXPECT_EQ(BINDER_TYPE_HANDLE, fb->hdr.type);
854 EXPECT_EQ(m_server, ProcessState::self()->getStrongProxyForHandle(fb->handle));
855 EXPECT_EQ((binder_uintptr_t)0, fb->cookie);
856 EXPECT_EQ((uint64_t)0, (uint64_t)fb->binder >> 32);
857 }
858
TEST_F(BinderLibTest,FreedBinder)859 TEST_F(BinderLibTest, FreedBinder) {
860 status_t ret;
861
862 sp<IBinder> server = addServer();
863 ASSERT_TRUE(server != nullptr);
864
865 __u32 freedHandle;
866 wp<IBinder> keepFreedBinder;
867 {
868 Parcel data, reply;
869 ASSERT_THAT(server->transact(BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION, data, &reply),
870 StatusEq(NO_ERROR));
871 struct flat_binder_object *freed = (struct flat_binder_object *)(reply.data());
872 freedHandle = freed->handle;
873 /* Add a weak ref to the freed binder so the driver does not
874 * delete its reference to it - otherwise the transaction
875 * fails regardless of whether the driver is fixed.
876 */
877 keepFreedBinder = reply.readStrongBinder();
878 }
879 IPCThreadState::self()->flushCommands();
880 {
881 Parcel data, reply;
882 data.writeStrongBinder(server);
883 /* Replace original handle with handle to the freed binder */
884 struct flat_binder_object *strong = (struct flat_binder_object *)(data.data());
885 __u32 oldHandle = strong->handle;
886 strong->handle = freedHandle;
887 ret = server->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data, &reply);
888 /* Returns DEAD_OBJECT (-32) if target crashes and
889 * FAILED_TRANSACTION if the driver rejects the invalid
890 * object.
891 */
892 EXPECT_EQ((status_t)FAILED_TRANSACTION, ret);
893 /* Restore original handle so parcel destructor does not use
894 * the wrong handle.
895 */
896 strong->handle = oldHandle;
897 }
898 }
899
TEST_F(BinderLibTest,CheckNoHeaderMappedInUser)900 TEST_F(BinderLibTest, CheckNoHeaderMappedInUser) {
901 Parcel data, reply;
902 sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
903 for (int i = 0; i < 2; i++) {
904 BinderLibTestBundle datai;
905 datai.appendFrom(&data, 0, data.dataSize());
906
907 data.freeData();
908 data.writeInt32(1);
909 data.writeStrongBinder(callBack);
910 data.writeInt32(BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF);
911
912 datai.appendTo(&data);
913 }
914 EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
915 StatusEq(NO_ERROR));
916 }
917
TEST_F(BinderLibTest,OnewayQueueing)918 TEST_F(BinderLibTest, OnewayQueueing)
919 {
920 Parcel data, data2;
921
922 sp<IBinder> pollServer = addPollServer();
923
924 sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
925 data.writeStrongBinder(callBack);
926 data.writeInt32(500000); // delay in us before calling back
927
928 sp<BinderLibTestCallBack> callBack2 = new BinderLibTestCallBack();
929 data2.writeStrongBinder(callBack2);
930 data2.writeInt32(0); // delay in us
931
932 EXPECT_THAT(pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data, nullptr, TF_ONE_WAY),
933 StatusEq(NO_ERROR));
934
935 // The delay ensures that this second transaction will end up on the async_todo list
936 // (for a single-threaded server)
937 EXPECT_THAT(pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data2, nullptr, TF_ONE_WAY),
938 StatusEq(NO_ERROR));
939
940 // The server will ensure that the two transactions are handled in the expected order;
941 // If the ordering is not as expected, an error will be returned through the callbacks.
942 EXPECT_THAT(callBack->waitEvent(2), StatusEq(NO_ERROR));
943 EXPECT_THAT(callBack->getResult(), StatusEq(NO_ERROR));
944
945 EXPECT_THAT(callBack2->waitEvent(2), StatusEq(NO_ERROR));
946 EXPECT_THAT(callBack2->getResult(), StatusEq(NO_ERROR));
947 }
948
TEST_F(BinderLibTest,WorkSourceUnsetByDefault)949 TEST_F(BinderLibTest, WorkSourceUnsetByDefault)
950 {
951 status_t ret;
952 Parcel data, reply;
953 data.writeInterfaceToken(binderLibTestServiceName);
954 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
955 EXPECT_EQ(-1, reply.readInt32());
956 EXPECT_EQ(NO_ERROR, ret);
957 }
958
TEST_F(BinderLibTest,WorkSourceSet)959 TEST_F(BinderLibTest, WorkSourceSet)
960 {
961 status_t ret;
962 Parcel data, reply;
963 IPCThreadState::self()->clearCallingWorkSource();
964 int64_t previousWorkSource = IPCThreadState::self()->setCallingWorkSourceUid(100);
965 data.writeInterfaceToken(binderLibTestServiceName);
966 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
967 EXPECT_EQ(100, reply.readInt32());
968 EXPECT_EQ(-1, previousWorkSource);
969 EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
970 EXPECT_EQ(NO_ERROR, ret);
971 }
972
TEST_F(BinderLibTest,WorkSourceSetWithoutPropagation)973 TEST_F(BinderLibTest, WorkSourceSetWithoutPropagation)
974 {
975 status_t ret;
976 Parcel data, reply;
977
978 IPCThreadState::self()->setCallingWorkSourceUidWithoutPropagation(100);
979 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
980
981 data.writeInterfaceToken(binderLibTestServiceName);
982 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
983 EXPECT_EQ(-1, reply.readInt32());
984 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
985 EXPECT_EQ(NO_ERROR, ret);
986 }
987
TEST_F(BinderLibTest,WorkSourceCleared)988 TEST_F(BinderLibTest, WorkSourceCleared)
989 {
990 status_t ret;
991 Parcel data, reply;
992
993 IPCThreadState::self()->setCallingWorkSourceUid(100);
994 int64_t token = IPCThreadState::self()->clearCallingWorkSource();
995 int32_t previousWorkSource = (int32_t)token;
996 data.writeInterfaceToken(binderLibTestServiceName);
997 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
998
999 EXPECT_EQ(-1, reply.readInt32());
1000 EXPECT_EQ(100, previousWorkSource);
1001 EXPECT_EQ(NO_ERROR, ret);
1002 }
1003
TEST_F(BinderLibTest,WorkSourceRestored)1004 TEST_F(BinderLibTest, WorkSourceRestored)
1005 {
1006 status_t ret;
1007 Parcel data, reply;
1008
1009 IPCThreadState::self()->setCallingWorkSourceUid(100);
1010 int64_t token = IPCThreadState::self()->clearCallingWorkSource();
1011 IPCThreadState::self()->restoreCallingWorkSource(token);
1012
1013 data.writeInterfaceToken(binderLibTestServiceName);
1014 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1015
1016 EXPECT_EQ(100, reply.readInt32());
1017 EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1018 EXPECT_EQ(NO_ERROR, ret);
1019 }
1020
TEST_F(BinderLibTest,PropagateFlagSet)1021 TEST_F(BinderLibTest, PropagateFlagSet)
1022 {
1023 IPCThreadState::self()->clearPropagateWorkSource();
1024 IPCThreadState::self()->setCallingWorkSourceUid(100);
1025 EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1026 }
1027
TEST_F(BinderLibTest,PropagateFlagCleared)1028 TEST_F(BinderLibTest, PropagateFlagCleared)
1029 {
1030 IPCThreadState::self()->setCallingWorkSourceUid(100);
1031 IPCThreadState::self()->clearPropagateWorkSource();
1032 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1033 }
1034
TEST_F(BinderLibTest,PropagateFlagRestored)1035 TEST_F(BinderLibTest, PropagateFlagRestored)
1036 {
1037 int token = IPCThreadState::self()->setCallingWorkSourceUid(100);
1038 IPCThreadState::self()->restoreCallingWorkSource(token);
1039
1040 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1041 }
1042
TEST_F(BinderLibTest,WorkSourcePropagatedForAllFollowingBinderCalls)1043 TEST_F(BinderLibTest, WorkSourcePropagatedForAllFollowingBinderCalls)
1044 {
1045 IPCThreadState::self()->setCallingWorkSourceUid(100);
1046
1047 Parcel data, reply;
1048 status_t ret;
1049 data.writeInterfaceToken(binderLibTestServiceName);
1050 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1051
1052 Parcel data2, reply2;
1053 status_t ret2;
1054 data2.writeInterfaceToken(binderLibTestServiceName);
1055 ret2 = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data2, &reply2);
1056 EXPECT_EQ(100, reply2.readInt32());
1057 EXPECT_EQ(NO_ERROR, ret2);
1058 }
1059
TEST_F(BinderLibTest,SchedPolicySet)1060 TEST_F(BinderLibTest, SchedPolicySet) {
1061 sp<IBinder> server = addServer();
1062 ASSERT_TRUE(server != nullptr);
1063
1064 Parcel data, reply;
1065 EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply),
1066 StatusEq(NO_ERROR));
1067
1068 int policy = reply.readInt32();
1069 int priority = reply.readInt32();
1070
1071 EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
1072 EXPECT_EQ(kSchedPriority, priority);
1073 }
1074
TEST_F(BinderLibTest,InheritRt)1075 TEST_F(BinderLibTest, InheritRt) {
1076 sp<IBinder> server = addServer();
1077 ASSERT_TRUE(server != nullptr);
1078
1079 const struct sched_param param {
1080 .sched_priority = kSchedPriorityMore,
1081 };
1082 EXPECT_EQ(0, sched_setscheduler(getpid(), SCHED_RR, ¶m));
1083
1084 Parcel data, reply;
1085 EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply),
1086 StatusEq(NO_ERROR));
1087
1088 int policy = reply.readInt32();
1089 int priority = reply.readInt32();
1090
1091 EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
1092 EXPECT_EQ(kSchedPriorityMore, priority);
1093 }
1094
TEST_F(BinderLibTest,VectorSent)1095 TEST_F(BinderLibTest, VectorSent) {
1096 Parcel data, reply;
1097 sp<IBinder> server = addServer();
1098 ASSERT_TRUE(server != nullptr);
1099
1100 std::vector<uint64_t> const testValue = { std::numeric_limits<uint64_t>::max(), 0, 200 };
1101 data.writeUint64Vector(testValue);
1102
1103 EXPECT_THAT(server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply), StatusEq(NO_ERROR));
1104 std::vector<uint64_t> readValue;
1105 EXPECT_THAT(reply.readUint64Vector(&readValue), StatusEq(OK));
1106 EXPECT_EQ(readValue, testValue);
1107 }
1108
TEST_F(BinderLibTest,BufRejected)1109 TEST_F(BinderLibTest, BufRejected) {
1110 Parcel data, reply;
1111 uint32_t buf;
1112 sp<IBinder> server = addServer();
1113 ASSERT_TRUE(server != nullptr);
1114
1115 binder_buffer_object obj {
1116 .hdr = { .type = BINDER_TYPE_PTR },
1117 .flags = 0,
1118 .buffer = reinterpret_cast<binder_uintptr_t>((void*)&buf),
1119 .length = 4,
1120 };
1121 data.setDataCapacity(1024);
1122 // Write a bogus object at offset 0 to get an entry in the offset table
1123 data.writeFileDescriptor(0);
1124 EXPECT_EQ(data.objectsCount(), 1);
1125 uint8_t *parcelData = const_cast<uint8_t*>(data.data());
1126 // And now, overwrite it with the buffer object
1127 memcpy(parcelData, &obj, sizeof(obj));
1128 data.setDataSize(sizeof(obj));
1129
1130 EXPECT_EQ(data.objectsCount(), 1);
1131
1132 // Either the kernel should reject this transaction (if it's correct), but
1133 // if it's not, the server implementation should return an error if it
1134 // finds an object in the received Parcel.
1135 EXPECT_THAT(server->transact(BINDER_LIB_TEST_REJECT_OBJECTS, data, &reply),
1136 Not(StatusEq(NO_ERROR)));
1137 }
1138
TEST_F(BinderLibTest,WeakRejected)1139 TEST_F(BinderLibTest, WeakRejected) {
1140 Parcel data, reply;
1141 sp<IBinder> server = addServer();
1142 ASSERT_TRUE(server != nullptr);
1143
1144 auto binder = sp<BBinder>::make();
1145 wp<BBinder> wpBinder(binder);
1146 flat_binder_object obj{
1147 .hdr = {.type = BINDER_TYPE_WEAK_BINDER},
1148 .flags = 0,
1149 .binder = reinterpret_cast<uintptr_t>(wpBinder.get_refs()),
1150 .cookie = reinterpret_cast<uintptr_t>(wpBinder.unsafe_get()),
1151 };
1152 data.setDataCapacity(1024);
1153 // Write a bogus object at offset 0 to get an entry in the offset table
1154 data.writeFileDescriptor(0);
1155 EXPECT_EQ(data.objectsCount(), 1);
1156 uint8_t *parcelData = const_cast<uint8_t *>(data.data());
1157 // And now, overwrite it with the weak binder
1158 memcpy(parcelData, &obj, sizeof(obj));
1159 data.setDataSize(sizeof(obj));
1160
1161 // a previous bug caused other objects to be released an extra time, so we
1162 // test with an object that libbinder will actually try to release
1163 EXPECT_EQ(OK, data.writeStrongBinder(sp<BBinder>::make()));
1164
1165 EXPECT_EQ(data.objectsCount(), 2);
1166
1167 // send it many times, since previous error was memory corruption, make it
1168 // more likely that the server crashes
1169 for (size_t i = 0; i < 100; i++) {
1170 EXPECT_THAT(server->transact(BINDER_LIB_TEST_REJECT_OBJECTS, data, &reply),
1171 StatusEq(BAD_VALUE));
1172 }
1173
1174 EXPECT_THAT(server->pingBinder(), StatusEq(NO_ERROR));
1175 }
1176
TEST_F(BinderLibTest,GotSid)1177 TEST_F(BinderLibTest, GotSid) {
1178 sp<IBinder> server = addServer();
1179
1180 Parcel data;
1181 EXPECT_THAT(server->transact(BINDER_LIB_TEST_CAN_GET_SID, data, nullptr), StatusEq(OK));
1182 }
1183
1184 class BinderLibTestService : public BBinder
1185 {
1186 public:
BinderLibTestService(int32_t id)1187 explicit BinderLibTestService(int32_t id)
1188 : m_id(id)
1189 , m_nextServerId(id + 1)
1190 , m_serverStartRequested(false)
1191 , m_callback(nullptr)
1192 {
1193 pthread_mutex_init(&m_serverWaitMutex, nullptr);
1194 pthread_cond_init(&m_serverWaitCond, nullptr);
1195 }
~BinderLibTestService()1196 ~BinderLibTestService()
1197 {
1198 exit(EXIT_SUCCESS);
1199 }
1200
processPendingCall()1201 void processPendingCall() {
1202 if (m_callback != nullptr) {
1203 Parcel data;
1204 data.writeInt32(NO_ERROR);
1205 m_callback->transact(BINDER_LIB_TEST_CALL_BACK, data, nullptr, TF_ONE_WAY);
1206 m_callback = nullptr;
1207 }
1208 }
1209
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)1210 virtual status_t onTransact(uint32_t code,
1211 const Parcel& data, Parcel* reply,
1212 uint32_t flags = 0) {
1213 if (getuid() != (uid_t)IPCThreadState::self()->getCallingUid()) {
1214 return PERMISSION_DENIED;
1215 }
1216 switch (code) {
1217 case BINDER_LIB_TEST_REGISTER_SERVER: {
1218 int32_t id;
1219 sp<IBinder> binder;
1220 id = data.readInt32();
1221 binder = data.readStrongBinder();
1222 if (binder == nullptr) {
1223 return BAD_VALUE;
1224 }
1225
1226 if (m_id != 0)
1227 return INVALID_OPERATION;
1228
1229 pthread_mutex_lock(&m_serverWaitMutex);
1230 if (m_serverStartRequested) {
1231 m_serverStartRequested = false;
1232 m_serverStarted = binder;
1233 pthread_cond_signal(&m_serverWaitCond);
1234 }
1235 pthread_mutex_unlock(&m_serverWaitMutex);
1236 return NO_ERROR;
1237 }
1238 case BINDER_LIB_TEST_ADD_POLL_SERVER:
1239 case BINDER_LIB_TEST_ADD_SERVER: {
1240 int ret;
1241 int serverid;
1242
1243 if (m_id != 0) {
1244 return INVALID_OPERATION;
1245 }
1246 pthread_mutex_lock(&m_serverWaitMutex);
1247 if (m_serverStartRequested) {
1248 ret = -EBUSY;
1249 } else {
1250 serverid = m_nextServerId++;
1251 m_serverStartRequested = true;
1252 bool usePoll = code == BINDER_LIB_TEST_ADD_POLL_SERVER;
1253
1254 pthread_mutex_unlock(&m_serverWaitMutex);
1255 ret = start_server_process(serverid, usePoll);
1256 pthread_mutex_lock(&m_serverWaitMutex);
1257 }
1258 if (ret > 0) {
1259 if (m_serverStartRequested) {
1260 struct timespec ts;
1261 clock_gettime(CLOCK_REALTIME, &ts);
1262 ts.tv_sec += 5;
1263 ret = pthread_cond_timedwait(&m_serverWaitCond, &m_serverWaitMutex, &ts);
1264 }
1265 if (m_serverStartRequested) {
1266 m_serverStartRequested = false;
1267 ret = -ETIMEDOUT;
1268 } else {
1269 reply->writeStrongBinder(m_serverStarted);
1270 reply->writeInt32(serverid);
1271 m_serverStarted = nullptr;
1272 ret = NO_ERROR;
1273 }
1274 } else if (ret >= 0) {
1275 m_serverStartRequested = false;
1276 ret = UNKNOWN_ERROR;
1277 }
1278 pthread_mutex_unlock(&m_serverWaitMutex);
1279 return ret;
1280 }
1281 case BINDER_LIB_TEST_GETPID:
1282 reply->writeInt32(getpid());
1283 return NO_ERROR;
1284 case BINDER_LIB_TEST_NOP_TRANSACTION_WAIT:
1285 usleep(5000);
1286 [[fallthrough]];
1287 case BINDER_LIB_TEST_NOP_TRANSACTION:
1288 // oneway error codes should be ignored
1289 if (flags & TF_ONE_WAY) {
1290 return UNKNOWN_ERROR;
1291 }
1292 return NO_ERROR;
1293 case BINDER_LIB_TEST_DELAYED_CALL_BACK: {
1294 // Note: this transaction is only designed for use with a
1295 // poll() server. See comments around epoll_wait().
1296 if (m_callback != nullptr) {
1297 // A callback was already pending; this means that
1298 // we received a second call while still processing
1299 // the first one. Fail the test.
1300 sp<IBinder> callback = data.readStrongBinder();
1301 Parcel data2;
1302 data2.writeInt32(UNKNOWN_ERROR);
1303
1304 callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, nullptr, TF_ONE_WAY);
1305 } else {
1306 m_callback = data.readStrongBinder();
1307 int32_t delayUs = data.readInt32();
1308 /*
1309 * It's necessary that we sleep here, so the next
1310 * transaction the caller makes will be queued to
1311 * the async queue.
1312 */
1313 usleep(delayUs);
1314
1315 /*
1316 * Now when we return, libbinder will tell the kernel
1317 * we are done with this transaction, and the kernel
1318 * can move the queued transaction to either the
1319 * thread todo worklist (for kernels without the fix),
1320 * or the proc todo worklist. In case of the former,
1321 * the next outbound call will pick up the pending
1322 * transaction, which leads to undesired reentrant
1323 * behavior. This is caught in the if() branch above.
1324 */
1325 }
1326
1327 return NO_ERROR;
1328 }
1329 case BINDER_LIB_TEST_NOP_CALL_BACK: {
1330 Parcel data2, reply2;
1331 sp<IBinder> binder;
1332 binder = data.readStrongBinder();
1333 if (binder == nullptr) {
1334 return BAD_VALUE;
1335 }
1336 data2.writeInt32(NO_ERROR);
1337 binder->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1338 return NO_ERROR;
1339 }
1340 case BINDER_LIB_TEST_GET_SELF_TRANSACTION:
1341 reply->writeStrongBinder(this);
1342 return NO_ERROR;
1343 case BINDER_LIB_TEST_GET_ID_TRANSACTION:
1344 reply->writeInt32(m_id);
1345 return NO_ERROR;
1346 case BINDER_LIB_TEST_INDIRECT_TRANSACTION: {
1347 int32_t count;
1348 uint32_t indirect_code;
1349 sp<IBinder> binder;
1350
1351 count = data.readInt32();
1352 reply->writeInt32(m_id);
1353 reply->writeInt32(count);
1354 for (int i = 0; i < count; i++) {
1355 binder = data.readStrongBinder();
1356 if (binder == nullptr) {
1357 return BAD_VALUE;
1358 }
1359 indirect_code = data.readInt32();
1360 BinderLibTestBundle data2(&data);
1361 if (!data2.isValid()) {
1362 return BAD_VALUE;
1363 }
1364 BinderLibTestBundle reply2;
1365 binder->transact(indirect_code, data2, &reply2);
1366 reply2.appendTo(reply);
1367 }
1368 return NO_ERROR;
1369 }
1370 case BINDER_LIB_TEST_SET_ERROR_TRANSACTION:
1371 reply->setError(data.readInt32());
1372 return NO_ERROR;
1373 case BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION:
1374 reply->writeInt32(sizeof(void *));
1375 return NO_ERROR;
1376 case BINDER_LIB_TEST_GET_STATUS_TRANSACTION:
1377 return NO_ERROR;
1378 case BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION:
1379 m_strongRef = data.readStrongBinder();
1380 return NO_ERROR;
1381 case BINDER_LIB_TEST_LINK_DEATH_TRANSACTION: {
1382 int ret;
1383 Parcel data2, reply2;
1384 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
1385 sp<IBinder> target;
1386 sp<IBinder> callback;
1387
1388 target = data.readStrongBinder();
1389 if (target == nullptr) {
1390 return BAD_VALUE;
1391 }
1392 callback = data.readStrongBinder();
1393 if (callback == nullptr) {
1394 return BAD_VALUE;
1395 }
1396 ret = target->linkToDeath(testDeathRecipient);
1397 if (ret == NO_ERROR)
1398 ret = testDeathRecipient->waitEvent(5);
1399 data2.writeInt32(ret);
1400 callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1401 return NO_ERROR;
1402 }
1403 case BINDER_LIB_TEST_WRITE_FILE_TRANSACTION: {
1404 int ret;
1405 int32_t size;
1406 const void *buf;
1407 int fd;
1408
1409 fd = data.readFileDescriptor();
1410 if (fd < 0) {
1411 return BAD_VALUE;
1412 }
1413 ret = data.readInt32(&size);
1414 if (ret != NO_ERROR) {
1415 return ret;
1416 }
1417 buf = data.readInplace(size);
1418 if (buf == nullptr) {
1419 return BAD_VALUE;
1420 }
1421 ret = write(fd, buf, size);
1422 if (ret != size)
1423 return UNKNOWN_ERROR;
1424 return NO_ERROR;
1425 }
1426 case BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION: {
1427 int ret;
1428 int32_t size;
1429 const void *buf;
1430 android::base::unique_fd fd;
1431
1432 ret = data.readUniqueParcelFileDescriptor(&fd);
1433 if (ret != NO_ERROR) {
1434 return ret;
1435 }
1436 ret = data.readInt32(&size);
1437 if (ret != NO_ERROR) {
1438 return ret;
1439 }
1440 buf = data.readInplace(size);
1441 if (buf == nullptr) {
1442 return BAD_VALUE;
1443 }
1444 ret = write(fd.get(), buf, size);
1445 if (ret != size) return UNKNOWN_ERROR;
1446 return NO_ERROR;
1447 }
1448 case BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION:
1449 alarm(10);
1450 return NO_ERROR;
1451 case BINDER_LIB_TEST_EXIT_TRANSACTION:
1452 while (wait(nullptr) != -1 || errno != ECHILD)
1453 ;
1454 exit(EXIT_SUCCESS);
1455 case BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION: {
1456 sp<IBinder> binder = new BBinder();
1457 reply->writeStrongBinder(binder);
1458 return NO_ERROR;
1459 }
1460 case BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION: {
1461 data.enforceInterface(binderLibTestServiceName);
1462 reply->writeInt32(IPCThreadState::self()->getCallingWorkSourceUid());
1463 return NO_ERROR;
1464 }
1465 case BINDER_LIB_TEST_GET_SCHEDULING_POLICY: {
1466 int policy = 0;
1467 sched_param param;
1468 if (0 != pthread_getschedparam(pthread_self(), &policy, ¶m)) {
1469 return UNKNOWN_ERROR;
1470 }
1471 reply->writeInt32(policy);
1472 reply->writeInt32(param.sched_priority);
1473 return NO_ERROR;
1474 }
1475 case BINDER_LIB_TEST_ECHO_VECTOR: {
1476 std::vector<uint64_t> vector;
1477 auto err = data.readUint64Vector(&vector);
1478 if (err != NO_ERROR)
1479 return err;
1480 reply->writeUint64Vector(vector);
1481 return NO_ERROR;
1482 }
1483 case BINDER_LIB_TEST_REJECT_OBJECTS: {
1484 return data.objectsCount() == 0 ? BAD_VALUE : NO_ERROR;
1485 }
1486 case BINDER_LIB_TEST_CAN_GET_SID: {
1487 return IPCThreadState::self()->getCallingSid() == nullptr ? BAD_VALUE : NO_ERROR;
1488 }
1489 default:
1490 return UNKNOWN_TRANSACTION;
1491 };
1492 }
1493 private:
1494 int32_t m_id;
1495 int32_t m_nextServerId;
1496 pthread_mutex_t m_serverWaitMutex;
1497 pthread_cond_t m_serverWaitCond;
1498 bool m_serverStartRequested;
1499 sp<IBinder> m_serverStarted;
1500 sp<IBinder> m_strongRef;
1501 sp<IBinder> m_callback;
1502 };
1503
run_server(int index,int readypipefd,bool usePoll)1504 int run_server(int index, int readypipefd, bool usePoll)
1505 {
1506 binderLibTestServiceName += String16(binderserversuffix);
1507
1508 status_t ret;
1509 sp<IServiceManager> sm = defaultServiceManager();
1510 BinderLibTestService* testServicePtr;
1511 {
1512 sp<BinderLibTestService> testService = new BinderLibTestService(index);
1513
1514 testService->setMinSchedulerPolicy(kSchedPolicy, kSchedPriority);
1515
1516 testService->setInheritRt(true);
1517
1518 /*
1519 * Normally would also contain functionality as well, but we are only
1520 * testing the extension mechanism.
1521 */
1522 testService->setExtension(new BBinder());
1523
1524 // Required for test "BufRejected'
1525 testService->setRequestingSid(true);
1526
1527 /*
1528 * We need this below, but can't hold a sp<> because it prevents the
1529 * node from being cleaned up automatically. It's safe in this case
1530 * because of how the tests are written.
1531 */
1532 testServicePtr = testService.get();
1533
1534 if (index == 0) {
1535 ret = sm->addService(binderLibTestServiceName, testService);
1536 } else {
1537 sp<IBinder> server = sm->getService(binderLibTestServiceName);
1538 Parcel data, reply;
1539 data.writeInt32(index);
1540 data.writeStrongBinder(testService);
1541
1542 ret = server->transact(BINDER_LIB_TEST_REGISTER_SERVER, data, &reply);
1543 }
1544 }
1545 write(readypipefd, &ret, sizeof(ret));
1546 close(readypipefd);
1547 //printf("%s: ret %d\n", __func__, ret);
1548 if (ret)
1549 return 1;
1550 //printf("%s: joinThreadPool\n", __func__);
1551 if (usePoll) {
1552 int fd;
1553 struct epoll_event ev;
1554 int epoll_fd;
1555 IPCThreadState::self()->setupPolling(&fd);
1556 if (fd < 0) {
1557 return 1;
1558 }
1559 IPCThreadState::self()->flushCommands(); // flush BC_ENTER_LOOPER
1560
1561 epoll_fd = epoll_create1(EPOLL_CLOEXEC);
1562 if (epoll_fd == -1) {
1563 return 1;
1564 }
1565
1566 ev.events = EPOLLIN;
1567 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &ev) == -1) {
1568 return 1;
1569 }
1570
1571 while (1) {
1572 /*
1573 * We simulate a single-threaded process using the binder poll
1574 * interface; besides handling binder commands, it can also
1575 * issue outgoing transactions, by storing a callback in
1576 * m_callback.
1577 *
1578 * processPendingCall() will then issue that transaction.
1579 */
1580 struct epoll_event events[1];
1581 int numEvents = epoll_wait(epoll_fd, events, 1, 1000);
1582 if (numEvents < 0) {
1583 if (errno == EINTR) {
1584 continue;
1585 }
1586 return 1;
1587 }
1588 if (numEvents > 0) {
1589 IPCThreadState::self()->handlePolledCommands();
1590 IPCThreadState::self()->flushCommands(); // flush BC_FREE_BUFFER
1591 testServicePtr->processPendingCall();
1592 }
1593 }
1594 } else {
1595 ProcessState::self()->startThreadPool();
1596 IPCThreadState::self()->joinThreadPool();
1597 }
1598 //printf("%s: joinThreadPool returned\n", __func__);
1599 return 1; /* joinThreadPool should not return */
1600 }
1601
main(int argc,char ** argv)1602 int main(int argc, char **argv) {
1603 ExitIfWrongAbi();
1604
1605 if (argc == 4 && !strcmp(argv[1], "--servername")) {
1606 binderservername = argv[2];
1607 } else {
1608 binderservername = argv[0];
1609 }
1610
1611 if (argc == 6 && !strcmp(argv[1], binderserverarg)) {
1612 binderserversuffix = argv[5];
1613 return run_server(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]) == 1);
1614 }
1615 binderserversuffix = new char[16];
1616 snprintf(binderserversuffix, 16, "%d", getpid());
1617 binderLibTestServiceName += String16(binderserversuffix);
1618
1619 ::testing::InitGoogleTest(&argc, argv);
1620 binder_env = AddGlobalTestEnvironment(new BinderLibTestEnv());
1621 ProcessState::self()->startThreadPool();
1622 return RUN_ALL_TESTS();
1623 }
1624