1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17
18 #include <bpf_timeinstate.h>
19
20 #include <sys/sysinfo.h>
21
22 #include <pthread.h>
23 #include <semaphore.h>
24 #include <numeric>
25 #include <unordered_map>
26 #include <vector>
27
28 #include <gtest/gtest.h>
29
30 #include <android-base/unique_fd.h>
31 #include <bpf/BpfMap.h>
32 #include <cputimeinstate.h>
33 #include <libbpf.h>
34
35 namespace android {
36 namespace bpf {
37
38 static constexpr uint64_t NSEC_PER_SEC = 1000000000;
39 static constexpr uint64_t NSEC_PER_YEAR = NSEC_PER_SEC * 60 * 60 * 24 * 365;
40
41 using std::vector;
42
TEST(TimeInStateTest,IsTrackingSupported)43 TEST(TimeInStateTest, IsTrackingSupported) {
44 isTrackingUidTimesSupported();
45 SUCCEED();
46 }
47
TEST(TimeInStateTest,TotalTimeInState)48 TEST(TimeInStateTest, TotalTimeInState) {
49 auto times = getTotalCpuFreqTimes();
50 ASSERT_TRUE(times.has_value());
51 EXPECT_FALSE(times->empty());
52 }
53
TEST(TimeInStateTest,SingleUidTimeInState)54 TEST(TimeInStateTest, SingleUidTimeInState) {
55 auto times = getUidCpuFreqTimes(0);
56 ASSERT_TRUE(times.has_value());
57 EXPECT_FALSE(times->empty());
58 }
59
TEST(TimeInStateTest,SingleUidConcurrentTimes)60 TEST(TimeInStateTest, SingleUidConcurrentTimes) {
61 auto concurrentTimes = getUidConcurrentTimes(0);
62 ASSERT_TRUE(concurrentTimes.has_value());
63 ASSERT_FALSE(concurrentTimes->active.empty());
64 ASSERT_FALSE(concurrentTimes->policy.empty());
65
66 uint64_t policyEntries = 0;
67 for (const auto &policyTimeVec : concurrentTimes->policy) policyEntries += policyTimeVec.size();
68 ASSERT_EQ(concurrentTimes->active.size(), policyEntries);
69 }
70
TestConcurrentTimesConsistent(const struct concurrent_time_t & concurrentTime)71 static void TestConcurrentTimesConsistent(const struct concurrent_time_t &concurrentTime) {
72 size_t maxPolicyCpus = 0;
73 for (const auto &vec : concurrentTime.policy) {
74 maxPolicyCpus = std::max(maxPolicyCpus, vec.size());
75 }
76 uint64_t policySum = 0;
77 for (size_t i = 0; i < maxPolicyCpus; ++i) {
78 for (const auto &vec : concurrentTime.policy) {
79 if (i < vec.size()) policySum += vec[i];
80 }
81 ASSERT_LE(concurrentTime.active[i], policySum);
82 policySum -= concurrentTime.active[i];
83 }
84 policySum = 0;
85 for (size_t i = 0; i < concurrentTime.active.size(); ++i) {
86 for (const auto &vec : concurrentTime.policy) {
87 if (i < vec.size()) policySum += vec[vec.size() - 1 - i];
88 }
89 auto activeSum = concurrentTime.active[concurrentTime.active.size() - 1 - i];
90 // This check is slightly flaky because we may read a map entry in the middle of an update
91 // when active times have been updated but policy times have not. This happens infrequently
92 // and can be distinguished from more serious bugs by re-running the test: if the underlying
93 // data itself is inconsistent, the test will fail every time.
94 ASSERT_LE(activeSum, policySum);
95 policySum -= activeSum;
96 }
97 }
98
TestUidTimesConsistent(const std::vector<std::vector<uint64_t>> & timeInState,const struct concurrent_time_t & concurrentTime)99 static void TestUidTimesConsistent(const std::vector<std::vector<uint64_t>> &timeInState,
100 const struct concurrent_time_t &concurrentTime) {
101 ASSERT_NO_FATAL_FAILURE(TestConcurrentTimesConsistent(concurrentTime));
102 ASSERT_EQ(timeInState.size(), concurrentTime.policy.size());
103 uint64_t policySum = 0;
104 for (uint32_t i = 0; i < timeInState.size(); ++i) {
105 uint64_t tisSum =
106 std::accumulate(timeInState[i].begin(), timeInState[i].end(), (uint64_t)0);
107 uint64_t concurrentSum = std::accumulate(concurrentTime.policy[i].begin(),
108 concurrentTime.policy[i].end(), (uint64_t)0);
109 if (tisSum < concurrentSum)
110 ASSERT_LE(concurrentSum - tisSum, NSEC_PER_SEC);
111 else
112 ASSERT_LE(tisSum - concurrentSum, NSEC_PER_SEC);
113 policySum += concurrentSum;
114 }
115 uint64_t activeSum = std::accumulate(concurrentTime.active.begin(), concurrentTime.active.end(),
116 (uint64_t)0);
117 EXPECT_EQ(activeSum, policySum);
118 }
119
TEST(TimeInStateTest,SingleUidTimesConsistent)120 TEST(TimeInStateTest, SingleUidTimesConsistent) {
121 auto times = getUidCpuFreqTimes(0);
122 ASSERT_TRUE(times.has_value());
123
124 auto concurrentTimes = getUidConcurrentTimes(0);
125 ASSERT_TRUE(concurrentTimes.has_value());
126
127 ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(*times, *concurrentTimes));
128 }
129
TEST(TimeInStateTest,AllUidTimeInState)130 TEST(TimeInStateTest, AllUidTimeInState) {
131 uint64_t zero = 0;
132 auto maps = {getUidsCpuFreqTimes(), getUidsUpdatedCpuFreqTimes(&zero)};
133 for (const auto &map : maps) {
134 ASSERT_TRUE(map.has_value());
135
136 ASSERT_FALSE(map->empty());
137
138 vector<size_t> sizes;
139 auto firstEntry = map->begin()->second;
140 for (const auto &subEntry : firstEntry) sizes.emplace_back(subEntry.size());
141
142 for (const auto &vec : *map) {
143 ASSERT_EQ(vec.second.size(), sizes.size());
144 for (size_t i = 0; i < vec.second.size(); ++i) ASSERT_EQ(vec.second[i].size(), sizes[i]);
145 }
146 }
147 }
148
TestCheckUpdate(const std::vector<std::vector<uint64_t>> & before,const std::vector<std::vector<uint64_t>> & after)149 void TestCheckUpdate(const std::vector<std::vector<uint64_t>> &before,
150 const std::vector<std::vector<uint64_t>> &after) {
151 ASSERT_EQ(before.size(), after.size());
152 uint64_t sumBefore = 0, sumAfter = 0;
153 for (size_t i = 0; i < before.size(); ++i) {
154 ASSERT_EQ(before[i].size(), after[i].size());
155 for (size_t j = 0; j < before[i].size(); ++j) {
156 // Times should never decrease
157 ASSERT_LE(before[i][j], after[i][j]);
158 }
159 sumBefore += std::accumulate(before[i].begin(), before[i].end(), (uint64_t)0);
160 sumAfter += std::accumulate(after[i].begin(), after[i].end(), (uint64_t)0);
161 }
162 ASSERT_LE(sumBefore, sumAfter);
163 ASSERT_LE(sumAfter - sumBefore, NSEC_PER_SEC);
164 }
165
TEST(TimeInStateTest,AllUidUpdatedTimeInState)166 TEST(TimeInStateTest, AllUidUpdatedTimeInState) {
167 uint64_t lastUpdate = 0;
168 auto map1 = getUidsUpdatedCpuFreqTimes(&lastUpdate);
169 ASSERT_TRUE(map1.has_value());
170 ASSERT_FALSE(map1->empty());
171 ASSERT_NE(lastUpdate, (uint64_t)0);
172 uint64_t oldLastUpdate = lastUpdate;
173
174 // Sleep briefly to trigger a context switch, ensuring we see at least one update.
175 struct timespec ts;
176 ts.tv_sec = 0;
177 ts.tv_nsec = 1000000;
178 nanosleep (&ts, NULL);
179
180 auto map2 = getUidsUpdatedCpuFreqTimes(&lastUpdate);
181 ASSERT_TRUE(map2.has_value());
182 ASSERT_FALSE(map2->empty());
183 ASSERT_NE(lastUpdate, oldLastUpdate);
184
185 bool someUidsExcluded = false;
186 for (const auto &[uid, v] : *map1) {
187 if (map2->find(uid) == map2->end()) {
188 someUidsExcluded = true;
189 break;
190 }
191 }
192 ASSERT_TRUE(someUidsExcluded);
193
194 for (const auto &[uid, newTimes] : *map2) {
195 ASSERT_NE(map1->find(uid), map1->end());
196 ASSERT_NO_FATAL_FAILURE(TestCheckUpdate((*map1)[uid], newTimes));
197 }
198 }
199
TEST(TimeInStateTest,TotalAndAllUidTimeInStateConsistent)200 TEST(TimeInStateTest, TotalAndAllUidTimeInStateConsistent) {
201 auto allUid = getUidsCpuFreqTimes();
202 auto total = getTotalCpuFreqTimes();
203
204 ASSERT_TRUE(allUid.has_value() && total.has_value());
205
206 // Check the number of policies.
207 ASSERT_EQ(allUid->at(0).size(), total->size());
208
209 for (uint32_t policyIdx = 0; policyIdx < total->size(); ++policyIdx) {
210 std::vector<uint64_t> totalTimes = total->at(policyIdx);
211 uint32_t totalFreqsCount = totalTimes.size();
212 std::vector<uint64_t> allUidTimes(totalFreqsCount, 0);
213 for (auto const &[uid, uidTimes]: *allUid) {
214 for (uint32_t freqIdx = 0; freqIdx < uidTimes[policyIdx].size(); ++freqIdx) {
215 allUidTimes[std::min(freqIdx, totalFreqsCount - 1)] += uidTimes[policyIdx][freqIdx];
216 }
217 }
218
219 for (uint32_t freqIdx = 0; freqIdx < totalFreqsCount; ++freqIdx) {
220 ASSERT_LE(allUidTimes[freqIdx], totalTimes[freqIdx]);
221 }
222 }
223 }
224
TEST(TimeInStateTest,SingleAndAllUidTimeInStateConsistent)225 TEST(TimeInStateTest, SingleAndAllUidTimeInStateConsistent) {
226 uint64_t zero = 0;
227 auto maps = {getUidsCpuFreqTimes(), getUidsUpdatedCpuFreqTimes(&zero)};
228 for (const auto &map : maps) {
229 ASSERT_TRUE(map.has_value());
230 ASSERT_FALSE(map->empty());
231
232 for (const auto &kv : *map) {
233 uint32_t uid = kv.first;
234 auto times1 = kv.second;
235 auto times2 = getUidCpuFreqTimes(uid);
236 ASSERT_TRUE(times2.has_value());
237
238 ASSERT_EQ(times1.size(), times2->size());
239 for (uint32_t i = 0; i < times1.size(); ++i) {
240 ASSERT_EQ(times1[i].size(), (*times2)[i].size());
241 for (uint32_t j = 0; j < times1[i].size(); ++j) {
242 ASSERT_LE((*times2)[i][j] - times1[i][j], NSEC_PER_SEC);
243 }
244 }
245 }
246 }
247 }
248
TEST(TimeInStateTest,AllUidConcurrentTimes)249 TEST(TimeInStateTest, AllUidConcurrentTimes) {
250 uint64_t zero = 0;
251 auto maps = {getUidsConcurrentTimes(), getUidsUpdatedConcurrentTimes(&zero)};
252 for (const auto &map : maps) {
253 ASSERT_TRUE(map.has_value());
254 ASSERT_FALSE(map->empty());
255
256 auto firstEntry = map->begin()->second;
257 for (const auto &kv : *map) {
258 ASSERT_EQ(kv.second.active.size(), firstEntry.active.size());
259 ASSERT_EQ(kv.second.policy.size(), firstEntry.policy.size());
260 for (size_t i = 0; i < kv.second.policy.size(); ++i) {
261 ASSERT_EQ(kv.second.policy[i].size(), firstEntry.policy[i].size());
262 }
263 }
264 }
265 }
266
TEST(TimeInStateTest,AllUidUpdatedConcurrentTimes)267 TEST(TimeInStateTest, AllUidUpdatedConcurrentTimes) {
268 uint64_t lastUpdate = 0;
269 auto map1 = getUidsUpdatedConcurrentTimes(&lastUpdate);
270 ASSERT_TRUE(map1.has_value());
271 ASSERT_FALSE(map1->empty());
272 ASSERT_NE(lastUpdate, (uint64_t)0);
273
274 // Sleep briefly to trigger a context switch, ensuring we see at least one update.
275 struct timespec ts;
276 ts.tv_sec = 0;
277 ts.tv_nsec = 1000000;
278 nanosleep (&ts, NULL);
279
280 uint64_t oldLastUpdate = lastUpdate;
281 auto map2 = getUidsUpdatedConcurrentTimes(&lastUpdate);
282 ASSERT_TRUE(map2.has_value());
283 ASSERT_FALSE(map2->empty());
284 ASSERT_NE(lastUpdate, oldLastUpdate);
285
286 bool someUidsExcluded = false;
287 for (const auto &[uid, v] : *map1) {
288 if (map2->find(uid) == map2->end()) {
289 someUidsExcluded = true;
290 break;
291 }
292 }
293 ASSERT_TRUE(someUidsExcluded);
294
295 for (const auto &[uid, newTimes] : *map2) {
296 ASSERT_NE(map1->find(uid), map1->end());
297 ASSERT_NO_FATAL_FAILURE(TestCheckUpdate({(*map1)[uid].active},{newTimes.active}));
298 ASSERT_NO_FATAL_FAILURE(TestCheckUpdate((*map1)[uid].policy, newTimes.policy));
299 }
300 }
301
TEST(TimeInStateTest,SingleAndAllUidConcurrentTimesConsistent)302 TEST(TimeInStateTest, SingleAndAllUidConcurrentTimesConsistent) {
303 uint64_t zero = 0;
304 auto maps = {getUidsConcurrentTimes(), getUidsUpdatedConcurrentTimes(&zero)};
305 for (const auto &map : maps) {
306 ASSERT_TRUE(map.has_value());
307 for (const auto &kv : *map) {
308 uint32_t uid = kv.first;
309 auto times1 = kv.second;
310 auto times2 = getUidConcurrentTimes(uid);
311 ASSERT_TRUE(times2.has_value());
312 for (uint32_t i = 0; i < times1.active.size(); ++i) {
313 ASSERT_LE(times2->active[i] - times1.active[i], NSEC_PER_SEC);
314 }
315 for (uint32_t i = 0; i < times1.policy.size(); ++i) {
316 for (uint32_t j = 0; j < times1.policy[i].size(); ++j) {
317 ASSERT_LE(times2->policy[i][j] - times1.policy[i][j], NSEC_PER_SEC);
318 }
319 }
320 }
321 }
322 }
323
TestCheckDelta(uint64_t before,uint64_t after)324 void TestCheckDelta(uint64_t before, uint64_t after) {
325 // Times should never decrease
326 ASSERT_LE(before, after);
327 // UID can't have run for more than ~1s on each CPU
328 ASSERT_LE(after - before, NSEC_PER_SEC * 2 * get_nprocs_conf());
329 }
330
TEST(TimeInStateTest,TotalTimeInStateMonotonic)331 TEST(TimeInStateTest, TotalTimeInStateMonotonic) {
332 auto before = getTotalCpuFreqTimes();
333 ASSERT_TRUE(before.has_value());
334 sleep(1);
335 auto after = getTotalCpuFreqTimes();
336 ASSERT_TRUE(after.has_value());
337
338 for (uint32_t policyIdx = 0; policyIdx < after->size(); ++policyIdx) {
339 auto timesBefore = before->at(policyIdx);
340 auto timesAfter = after->at(policyIdx);
341 for (uint32_t freqIdx = 0; freqIdx < timesAfter.size(); ++freqIdx) {
342 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(timesBefore[freqIdx], timesAfter[freqIdx]));
343 }
344 }
345 }
346
TEST(TimeInStateTest,AllUidTimeInStateMonotonic)347 TEST(TimeInStateTest, AllUidTimeInStateMonotonic) {
348 auto map1 = getUidsCpuFreqTimes();
349 ASSERT_TRUE(map1.has_value());
350 sleep(1);
351 auto map2 = getUidsCpuFreqTimes();
352 ASSERT_TRUE(map2.has_value());
353
354 for (const auto &kv : *map1) {
355 uint32_t uid = kv.first;
356 auto times = kv.second;
357 ASSERT_NE(map2->find(uid), map2->end());
358 for (uint32_t policy = 0; policy < times.size(); ++policy) {
359 for (uint32_t freqIdx = 0; freqIdx < times[policy].size(); ++freqIdx) {
360 auto before = times[policy][freqIdx];
361 auto after = (*map2)[uid][policy][freqIdx];
362 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
363 }
364 }
365 }
366 }
367
TEST(TimeInStateTest,AllUidConcurrentTimesMonotonic)368 TEST(TimeInStateTest, AllUidConcurrentTimesMonotonic) {
369 auto map1 = getUidsConcurrentTimes();
370 ASSERT_TRUE(map1.has_value());
371 ASSERT_FALSE(map1->empty());
372 sleep(1);
373 auto map2 = getUidsConcurrentTimes();
374 ASSERT_TRUE(map2.has_value());
375 ASSERT_FALSE(map2->empty());
376
377 for (const auto &kv : *map1) {
378 uint32_t uid = kv.first;
379 auto times = kv.second;
380 ASSERT_NE(map2->find(uid), map2->end());
381 for (uint32_t i = 0; i < times.active.size(); ++i) {
382 auto before = times.active[i];
383 auto after = (*map2)[uid].active[i];
384 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
385 }
386 for (uint32_t policy = 0; policy < times.policy.size(); ++policy) {
387 for (uint32_t idx = 0; idx < times.policy[policy].size(); ++idx) {
388 auto before = times.policy[policy][idx];
389 auto after = (*map2)[uid].policy[policy][idx];
390 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
391 }
392 }
393 }
394 }
395
TEST(TimeInStateTest,AllUidTimeInStateSanityCheck)396 TEST(TimeInStateTest, AllUidTimeInStateSanityCheck) {
397 uint64_t zero = 0;
398 auto maps = {getUidsCpuFreqTimes(), getUidsUpdatedCpuFreqTimes(&zero)};
399 for (const auto &map : maps) {
400 ASSERT_TRUE(map.has_value());
401
402 bool foundLargeValue = false;
403 for (const auto &kv : *map) {
404 for (const auto &timeVec : kv.second) {
405 for (const auto &time : timeVec) {
406 ASSERT_LE(time, NSEC_PER_YEAR);
407 if (time > UINT32_MAX) foundLargeValue = true;
408 }
409 }
410 }
411 // UINT32_MAX nanoseconds is less than 5 seconds, so if every part of our pipeline is using
412 // uint64_t as expected, we should have some times higher than that.
413 ASSERT_TRUE(foundLargeValue);
414 }
415 }
416
TEST(TimeInStateTest,AllUidConcurrentTimesSanityCheck)417 TEST(TimeInStateTest, AllUidConcurrentTimesSanityCheck) {
418 uint64_t zero = 0;
419 auto maps = {getUidsConcurrentTimes(), getUidsUpdatedConcurrentTimes(&zero)};
420 for (const auto &concurrentMap : maps) {
421 ASSERT_TRUE(concurrentMap);
422
423 bool activeFoundLargeValue = false;
424 bool policyFoundLargeValue = false;
425 for (const auto &kv : *concurrentMap) {
426 for (const auto &time : kv.second.active) {
427 ASSERT_LE(time, NSEC_PER_YEAR);
428 if (time > UINT32_MAX) activeFoundLargeValue = true;
429 }
430 for (const auto &policyTimeVec : kv.second.policy) {
431 for (const auto &time : policyTimeVec) {
432 ASSERT_LE(time, NSEC_PER_YEAR);
433 if (time > UINT32_MAX) policyFoundLargeValue = true;
434 }
435 }
436 }
437 // UINT32_MAX nanoseconds is less than 5 seconds, so if every part of our pipeline is using
438 // uint64_t as expected, we should have some times higher than that.
439 ASSERT_TRUE(activeFoundLargeValue);
440 ASSERT_TRUE(policyFoundLargeValue);
441 }
442 }
443
TEST(TimeInStateTest,AllUidConcurrentTimesFailsOnInvalidBucket)444 TEST(TimeInStateTest, AllUidConcurrentTimesFailsOnInvalidBucket) {
445 uint32_t uid = 0;
446 {
447 // Find an unused UID
448 auto map = getUidsConcurrentTimes();
449 ASSERT_TRUE(map.has_value());
450 ASSERT_FALSE(map->empty());
451 for (const auto &kv : *map) uid = std::max(uid, kv.first);
452 ++uid;
453 }
454 android::base::unique_fd fd{
455 bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_concurrent_times_map")};
456 ASSERT_GE(fd, 0);
457 uint32_t nCpus = get_nprocs_conf();
458 uint32_t maxBucket = (nCpus - 1) / CPUS_PER_ENTRY;
459 time_key_t key = {.uid = uid, .bucket = maxBucket + 1};
460 std::vector<concurrent_val_t> vals(nCpus);
461 ASSERT_FALSE(writeToMapEntry(fd, &key, vals.data(), BPF_NOEXIST));
462 EXPECT_FALSE(getUidsConcurrentTimes().has_value());
463 ASSERT_FALSE(deleteMapEntry(fd, &key));
464 }
465
TEST(TimeInStateTest,AllUidTimesConsistent)466 TEST(TimeInStateTest, AllUidTimesConsistent) {
467 auto tisMap = getUidsCpuFreqTimes();
468 ASSERT_TRUE(tisMap.has_value());
469
470 auto concurrentMap = getUidsConcurrentTimes();
471 ASSERT_TRUE(concurrentMap.has_value());
472
473 ASSERT_EQ(tisMap->size(), concurrentMap->size());
474 for (const auto &kv : *tisMap) {
475 uint32_t uid = kv.first;
476 auto times = kv.second;
477 ASSERT_NE(concurrentMap->find(uid), concurrentMap->end());
478
479 auto concurrentTimes = (*concurrentMap)[uid];
480 ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(times, concurrentTimes));
481 }
482 }
483
TEST(TimeInStateTest,RemoveUid)484 TEST(TimeInStateTest, RemoveUid) {
485 uint32_t uid = 0;
486 {
487 // Find an unused UID
488 auto times = getUidsCpuFreqTimes();
489 ASSERT_TRUE(times.has_value());
490 ASSERT_FALSE(times->empty());
491 for (const auto &kv : *times) uid = std::max(uid, kv.first);
492 ++uid;
493 }
494 {
495 // Add a map entry for our fake UID by copying a real map entry
496 android::base::unique_fd fd{
497 bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_time_in_state_map")};
498 ASSERT_GE(fd, 0);
499 time_key_t k;
500 ASSERT_FALSE(getFirstMapKey(fd, &k));
501 std::vector<tis_val_t> vals(get_nprocs_conf());
502 ASSERT_FALSE(findMapEntry(fd, &k, vals.data()));
503 uint32_t copiedUid = k.uid;
504 k.uid = uid;
505 ASSERT_FALSE(writeToMapEntry(fd, &k, vals.data(), BPF_NOEXIST));
506
507 android::base::unique_fd fd2{
508 bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_concurrent_times_map")};
509 k.uid = copiedUid;
510 k.bucket = 0;
511 std::vector<concurrent_val_t> cvals(get_nprocs_conf());
512 ASSERT_FALSE(findMapEntry(fd2, &k, cvals.data()));
513 k.uid = uid;
514 ASSERT_FALSE(writeToMapEntry(fd2, &k, cvals.data(), BPF_NOEXIST));
515 }
516 auto times = getUidCpuFreqTimes(uid);
517 ASSERT_TRUE(times.has_value());
518 ASSERT_FALSE(times->empty());
519
520 auto concurrentTimes = getUidConcurrentTimes(0);
521 ASSERT_TRUE(concurrentTimes.has_value());
522 ASSERT_FALSE(concurrentTimes->active.empty());
523 ASSERT_FALSE(concurrentTimes->policy.empty());
524
525 uint64_t sum = 0;
526 for (size_t i = 0; i < times->size(); ++i) {
527 for (auto x : (*times)[i]) sum += x;
528 }
529 ASSERT_GT(sum, (uint64_t)0);
530
531 uint64_t activeSum = 0;
532 for (size_t i = 0; i < concurrentTimes->active.size(); ++i) {
533 activeSum += concurrentTimes->active[i];
534 }
535 ASSERT_GT(activeSum, (uint64_t)0);
536
537 ASSERT_TRUE(clearUidTimes(uid));
538
539 auto allTimes = getUidsCpuFreqTimes();
540 ASSERT_TRUE(allTimes.has_value());
541 ASSERT_FALSE(allTimes->empty());
542 ASSERT_EQ(allTimes->find(uid), allTimes->end());
543
544 auto allConcurrentTimes = getUidsConcurrentTimes();
545 ASSERT_TRUE(allConcurrentTimes.has_value());
546 ASSERT_FALSE(allConcurrentTimes->empty());
547 ASSERT_EQ(allConcurrentTimes->find(uid), allConcurrentTimes->end());
548 }
549
TEST(TimeInStateTest,GetCpuFreqs)550 TEST(TimeInStateTest, GetCpuFreqs) {
551 auto freqs = getCpuFreqs();
552 ASSERT_TRUE(freqs.has_value());
553
554 auto times = getUidCpuFreqTimes(0);
555 ASSERT_TRUE(times.has_value());
556
557 ASSERT_EQ(freqs->size(), times->size());
558 for (size_t i = 0; i < freqs->size(); ++i) EXPECT_EQ((*freqs)[i].size(), (*times)[i].size());
559 }
560
timeNanos()561 uint64_t timeNanos() {
562 struct timespec spec;
563 clock_gettime(CLOCK_MONOTONIC, &spec);
564 return spec.tv_sec * 1000000000 + spec.tv_nsec;
565 }
566
567 // Keeps CPU busy with some number crunching
useCpu()568 void useCpu() {
569 long sum = 0;
570 for (int i = 0; i < 100000; i++) {
571 sum *= i;
572 }
573 }
574
575 sem_t pingsem, pongsem;
576
testThread(void *)577 void *testThread(void *) {
578 for (int i = 0; i < 10; i++) {
579 sem_wait(&pingsem);
580 useCpu();
581 sem_post(&pongsem);
582 }
583 return nullptr;
584 }
585
TEST(TimeInStateTest,GetAggregatedTaskCpuFreqTimes)586 TEST(TimeInStateTest, GetAggregatedTaskCpuFreqTimes) {
587 uint64_t startTimeNs = timeNanos();
588
589 sem_init(&pingsem, 0, 1);
590 sem_init(&pongsem, 0, 0);
591
592 pthread_t thread;
593 ASSERT_EQ(pthread_create(&thread, NULL, &testThread, NULL), 0);
594
595 // This process may have been running for some time, so when we start tracking
596 // CPU time, the very first switch may include the accumulated time.
597 // Yield the remainder of this timeslice to the newly created thread.
598 sem_wait(&pongsem);
599 sem_post(&pingsem);
600
601 pid_t tgid = getpid();
602 startTrackingProcessCpuTimes(tgid);
603
604 pid_t tid = pthread_gettid_np(thread);
605 startAggregatingTaskCpuTimes(tid, 42);
606
607 // Play ping-pong with the other thread to ensure that both threads get
608 // some CPU time.
609 for (int i = 0; i < 9; i++) {
610 sem_wait(&pongsem);
611 useCpu();
612 sem_post(&pingsem);
613 }
614
615 pthread_join(thread, NULL);
616
617 std::optional<std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>>> optionalMap =
618 getAggregatedTaskCpuFreqTimes(tgid, {0, 42});
619 ASSERT_TRUE(optionalMap);
620
621 std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>> map = *optionalMap;
622 ASSERT_EQ(map.size(), 2u);
623
624 uint64_t testDurationNs = timeNanos() - startTimeNs;
625 for (auto pair : map) {
626 uint16_t aggregationKey = pair.first;
627 ASSERT_TRUE(aggregationKey == 0 || aggregationKey == 42);
628
629 std::vector<std::vector<uint64_t>> timesInState = pair.second;
630 uint64_t totalCpuTime = 0;
631 for (size_t i = 0; i < timesInState.size(); i++) {
632 for (size_t j = 0; j < timesInState[i].size(); j++) {
633 totalCpuTime += timesInState[i][j];
634 }
635 }
636 ASSERT_GT(totalCpuTime, 0ul);
637 ASSERT_LE(totalCpuTime, testDurationNs);
638 }
639 }
640
641 } // namespace bpf
642 } // namespace android
643