1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <elf.h>
20 #include <limits.h>
21 #include <malloc.h>
22 #include <pthread.h>
23 #include <semaphore.h>
24 #include <signal.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/auxv.h>
30 #include <sys/prctl.h>
31 #include <sys/types.h>
32 #include <sys/wait.h>
33 #include <unistd.h>
34 
35 #include <algorithm>
36 #include <atomic>
37 #include <thread>
38 #include <vector>
39 
40 #include <tinyxml2.h>
41 
42 #include <android-base/file.h>
43 
44 #include "utils.h"
45 
46 #if defined(__BIONIC__)
47 
48 #include "SignalUtils.h"
49 #include "dlext_private.h"
50 
51 #include "platform/bionic/malloc.h"
52 #include "platform/bionic/mte.h"
53 #include "platform/bionic/reserved_signals.h"
54 #include "private/bionic_config.h"
55 
56 #define HAVE_REALLOCARRAY 1
57 
58 #else
59 
60 #define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
61 
62 #endif
63 
TEST(malloc,malloc_std)64 TEST(malloc, malloc_std) {
65   // Simple malloc test.
66   void *ptr = malloc(100);
67   ASSERT_TRUE(ptr != nullptr);
68   ASSERT_LE(100U, malloc_usable_size(ptr));
69   free(ptr);
70 }
71 
TEST(malloc,malloc_overflow)72 TEST(malloc, malloc_overflow) {
73   SKIP_WITH_HWASAN;
74   errno = 0;
75   ASSERT_EQ(nullptr, malloc(SIZE_MAX));
76   ASSERT_EQ(ENOMEM, errno);
77 }
78 
TEST(malloc,calloc_std)79 TEST(malloc, calloc_std) {
80   // Simple calloc test.
81   size_t alloc_len = 100;
82   char *ptr = (char *)calloc(1, alloc_len);
83   ASSERT_TRUE(ptr != nullptr);
84   ASSERT_LE(alloc_len, malloc_usable_size(ptr));
85   for (size_t i = 0; i < alloc_len; i++) {
86     ASSERT_EQ(0, ptr[i]);
87   }
88   free(ptr);
89 }
90 
TEST(malloc,calloc_mem_init_disabled)91 TEST(malloc, calloc_mem_init_disabled) {
92 #if defined(__BIONIC__)
93   // calloc should still zero memory if mem-init is disabled.
94   // With jemalloc the mallopts will fail but that shouldn't affect the
95   // execution of the test.
96   mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
97   size_t alloc_len = 100;
98   char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
99   for (size_t i = 0; i < alloc_len; i++) {
100     ASSERT_EQ(0, ptr[i]);
101   }
102   free(ptr);
103   mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
104 #else
105   GTEST_SKIP() << "bionic-only test";
106 #endif
107 }
108 
TEST(malloc,calloc_illegal)109 TEST(malloc, calloc_illegal) {
110   SKIP_WITH_HWASAN;
111   errno = 0;
112   ASSERT_EQ(nullptr, calloc(-1, 100));
113   ASSERT_EQ(ENOMEM, errno);
114 }
115 
TEST(malloc,calloc_overflow)116 TEST(malloc, calloc_overflow) {
117   SKIP_WITH_HWASAN;
118   errno = 0;
119   ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
120   ASSERT_EQ(ENOMEM, errno);
121   errno = 0;
122   ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
123   ASSERT_EQ(ENOMEM, errno);
124   errno = 0;
125   ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
126   ASSERT_EQ(ENOMEM, errno);
127   errno = 0;
128   ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
129   ASSERT_EQ(ENOMEM, errno);
130 }
131 
TEST(malloc,memalign_multiple)132 TEST(malloc, memalign_multiple) {
133   SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
134   // Memalign test where the alignment is any value.
135   for (size_t i = 0; i <= 12; i++) {
136     for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
137       char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
138       ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
139       ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
140       ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
141           << "Failed at alignment " << alignment;
142       free(ptr);
143     }
144   }
145 }
146 
TEST(malloc,memalign_overflow)147 TEST(malloc, memalign_overflow) {
148   SKIP_WITH_HWASAN;
149   ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
150 }
151 
TEST(malloc,memalign_non_power2)152 TEST(malloc, memalign_non_power2) {
153   SKIP_WITH_HWASAN;
154   void* ptr;
155   for (size_t align = 0; align <= 256; align++) {
156     ptr = memalign(align, 1024);
157     ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
158     free(ptr);
159   }
160 }
161 
TEST(malloc,memalign_realloc)162 TEST(malloc, memalign_realloc) {
163   // Memalign and then realloc the pointer a couple of times.
164   for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
165     char *ptr = (char*)memalign(alignment, 100);
166     ASSERT_TRUE(ptr != nullptr);
167     ASSERT_LE(100U, malloc_usable_size(ptr));
168     ASSERT_EQ(0U, (intptr_t)ptr % alignment);
169     memset(ptr, 0x23, 100);
170 
171     ptr = (char*)realloc(ptr, 200);
172     ASSERT_TRUE(ptr != nullptr);
173     ASSERT_LE(200U, malloc_usable_size(ptr));
174     ASSERT_TRUE(ptr != nullptr);
175     for (size_t i = 0; i < 100; i++) {
176       ASSERT_EQ(0x23, ptr[i]);
177     }
178     memset(ptr, 0x45, 200);
179 
180     ptr = (char*)realloc(ptr, 300);
181     ASSERT_TRUE(ptr != nullptr);
182     ASSERT_LE(300U, malloc_usable_size(ptr));
183     for (size_t i = 0; i < 200; i++) {
184       ASSERT_EQ(0x45, ptr[i]);
185     }
186     memset(ptr, 0x67, 300);
187 
188     ptr = (char*)realloc(ptr, 250);
189     ASSERT_TRUE(ptr != nullptr);
190     ASSERT_LE(250U, malloc_usable_size(ptr));
191     for (size_t i = 0; i < 250; i++) {
192       ASSERT_EQ(0x67, ptr[i]);
193     }
194     free(ptr);
195   }
196 }
197 
TEST(malloc,malloc_realloc_larger)198 TEST(malloc, malloc_realloc_larger) {
199   // Realloc to a larger size, malloc is used for the original allocation.
200   char *ptr = (char *)malloc(100);
201   ASSERT_TRUE(ptr != nullptr);
202   ASSERT_LE(100U, malloc_usable_size(ptr));
203   memset(ptr, 67, 100);
204 
205   ptr = (char *)realloc(ptr, 200);
206   ASSERT_TRUE(ptr != nullptr);
207   ASSERT_LE(200U, malloc_usable_size(ptr));
208   for (size_t i = 0; i < 100; i++) {
209     ASSERT_EQ(67, ptr[i]);
210   }
211   free(ptr);
212 }
213 
TEST(malloc,malloc_realloc_smaller)214 TEST(malloc, malloc_realloc_smaller) {
215   // Realloc to a smaller size, malloc is used for the original allocation.
216   char *ptr = (char *)malloc(200);
217   ASSERT_TRUE(ptr != nullptr);
218   ASSERT_LE(200U, malloc_usable_size(ptr));
219   memset(ptr, 67, 200);
220 
221   ptr = (char *)realloc(ptr, 100);
222   ASSERT_TRUE(ptr != nullptr);
223   ASSERT_LE(100U, malloc_usable_size(ptr));
224   for (size_t i = 0; i < 100; i++) {
225     ASSERT_EQ(67, ptr[i]);
226   }
227   free(ptr);
228 }
229 
TEST(malloc,malloc_multiple_realloc)230 TEST(malloc, malloc_multiple_realloc) {
231   // Multiple reallocs, malloc is used for the original allocation.
232   char *ptr = (char *)malloc(200);
233   ASSERT_TRUE(ptr != nullptr);
234   ASSERT_LE(200U, malloc_usable_size(ptr));
235   memset(ptr, 0x23, 200);
236 
237   ptr = (char *)realloc(ptr, 100);
238   ASSERT_TRUE(ptr != nullptr);
239   ASSERT_LE(100U, malloc_usable_size(ptr));
240   for (size_t i = 0; i < 100; i++) {
241     ASSERT_EQ(0x23, ptr[i]);
242   }
243 
244   ptr = (char*)realloc(ptr, 50);
245   ASSERT_TRUE(ptr != nullptr);
246   ASSERT_LE(50U, malloc_usable_size(ptr));
247   for (size_t i = 0; i < 50; i++) {
248     ASSERT_EQ(0x23, ptr[i]);
249   }
250 
251   ptr = (char*)realloc(ptr, 150);
252   ASSERT_TRUE(ptr != nullptr);
253   ASSERT_LE(150U, malloc_usable_size(ptr));
254   for (size_t i = 0; i < 50; i++) {
255     ASSERT_EQ(0x23, ptr[i]);
256   }
257   memset(ptr, 0x23, 150);
258 
259   ptr = (char*)realloc(ptr, 425);
260   ASSERT_TRUE(ptr != nullptr);
261   ASSERT_LE(425U, malloc_usable_size(ptr));
262   for (size_t i = 0; i < 150; i++) {
263     ASSERT_EQ(0x23, ptr[i]);
264   }
265   free(ptr);
266 }
267 
TEST(malloc,calloc_realloc_larger)268 TEST(malloc, calloc_realloc_larger) {
269   // Realloc to a larger size, calloc is used for the original allocation.
270   char *ptr = (char *)calloc(1, 100);
271   ASSERT_TRUE(ptr != nullptr);
272   ASSERT_LE(100U, malloc_usable_size(ptr));
273 
274   ptr = (char *)realloc(ptr, 200);
275   ASSERT_TRUE(ptr != nullptr);
276   ASSERT_LE(200U, malloc_usable_size(ptr));
277   for (size_t i = 0; i < 100; i++) {
278     ASSERT_EQ(0, ptr[i]);
279   }
280   free(ptr);
281 }
282 
TEST(malloc,calloc_realloc_smaller)283 TEST(malloc, calloc_realloc_smaller) {
284   // Realloc to a smaller size, calloc is used for the original allocation.
285   char *ptr = (char *)calloc(1, 200);
286   ASSERT_TRUE(ptr != nullptr);
287   ASSERT_LE(200U, malloc_usable_size(ptr));
288 
289   ptr = (char *)realloc(ptr, 100);
290   ASSERT_TRUE(ptr != nullptr);
291   ASSERT_LE(100U, malloc_usable_size(ptr));
292   for (size_t i = 0; i < 100; i++) {
293     ASSERT_EQ(0, ptr[i]);
294   }
295   free(ptr);
296 }
297 
TEST(malloc,calloc_multiple_realloc)298 TEST(malloc, calloc_multiple_realloc) {
299   // Multiple reallocs, calloc is used for the original allocation.
300   char *ptr = (char *)calloc(1, 200);
301   ASSERT_TRUE(ptr != nullptr);
302   ASSERT_LE(200U, malloc_usable_size(ptr));
303 
304   ptr = (char *)realloc(ptr, 100);
305   ASSERT_TRUE(ptr != nullptr);
306   ASSERT_LE(100U, malloc_usable_size(ptr));
307   for (size_t i = 0; i < 100; i++) {
308     ASSERT_EQ(0, ptr[i]);
309   }
310 
311   ptr = (char*)realloc(ptr, 50);
312   ASSERT_TRUE(ptr != nullptr);
313   ASSERT_LE(50U, malloc_usable_size(ptr));
314   for (size_t i = 0; i < 50; i++) {
315     ASSERT_EQ(0, ptr[i]);
316   }
317 
318   ptr = (char*)realloc(ptr, 150);
319   ASSERT_TRUE(ptr != nullptr);
320   ASSERT_LE(150U, malloc_usable_size(ptr));
321   for (size_t i = 0; i < 50; i++) {
322     ASSERT_EQ(0, ptr[i]);
323   }
324   memset(ptr, 0, 150);
325 
326   ptr = (char*)realloc(ptr, 425);
327   ASSERT_TRUE(ptr != nullptr);
328   ASSERT_LE(425U, malloc_usable_size(ptr));
329   for (size_t i = 0; i < 150; i++) {
330     ASSERT_EQ(0, ptr[i]);
331   }
332   free(ptr);
333 }
334 
TEST(malloc,realloc_overflow)335 TEST(malloc, realloc_overflow) {
336   SKIP_WITH_HWASAN;
337   errno = 0;
338   ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
339   ASSERT_EQ(ENOMEM, errno);
340   void* ptr = malloc(100);
341   ASSERT_TRUE(ptr != nullptr);
342   errno = 0;
343   ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
344   ASSERT_EQ(ENOMEM, errno);
345   free(ptr);
346 }
347 
348 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
349 extern "C" void* pvalloc(size_t);
350 extern "C" void* valloc(size_t);
351 #endif
352 
TEST(malloc,pvalloc_std)353 TEST(malloc, pvalloc_std) {
354 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
355   size_t pagesize = sysconf(_SC_PAGESIZE);
356   void* ptr = pvalloc(100);
357   ASSERT_TRUE(ptr != nullptr);
358   ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
359   ASSERT_LE(pagesize, malloc_usable_size(ptr));
360   free(ptr);
361 #else
362   GTEST_SKIP() << "pvalloc not supported.";
363 #endif
364 }
365 
TEST(malloc,pvalloc_overflow)366 TEST(malloc, pvalloc_overflow) {
367 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
368   ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
369 #else
370   GTEST_SKIP() << "pvalloc not supported.";
371 #endif
372 }
373 
TEST(malloc,valloc_std)374 TEST(malloc, valloc_std) {
375 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
376   size_t pagesize = sysconf(_SC_PAGESIZE);
377   void* ptr = valloc(100);
378   ASSERT_TRUE(ptr != nullptr);
379   ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
380   free(ptr);
381 #else
382   GTEST_SKIP() << "valloc not supported.";
383 #endif
384 }
385 
TEST(malloc,valloc_overflow)386 TEST(malloc, valloc_overflow) {
387 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
388   ASSERT_EQ(nullptr, valloc(SIZE_MAX));
389 #else
390   GTEST_SKIP() << "valloc not supported.";
391 #endif
392 }
393 
TEST(malloc,malloc_info)394 TEST(malloc, malloc_info) {
395 #ifdef __BIONIC__
396   SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
397 
398   TemporaryFile tf;
399   ASSERT_TRUE(tf.fd != -1);
400   FILE* fp = fdopen(tf.fd, "w+");
401   tf.release();
402   ASSERT_TRUE(fp != nullptr);
403   ASSERT_EQ(0, malloc_info(0, fp));
404   ASSERT_EQ(0, fclose(fp));
405 
406   std::string contents;
407   ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
408 
409   tinyxml2::XMLDocument doc;
410   ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
411 
412   auto root = doc.FirstChildElement();
413   ASSERT_NE(nullptr, root);
414   ASSERT_STREQ("malloc", root->Name());
415   std::string version(root->Attribute("version"));
416   if (version == "jemalloc-1") {
417     auto arena = root->FirstChildElement();
418     for (; arena != nullptr; arena = arena->NextSiblingElement()) {
419       int val;
420 
421       ASSERT_STREQ("heap", arena->Name());
422       ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
423       ASSERT_EQ(tinyxml2::XML_SUCCESS,
424                 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
425       ASSERT_EQ(tinyxml2::XML_SUCCESS,
426                 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
427       ASSERT_EQ(tinyxml2::XML_SUCCESS,
428                 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
429       ASSERT_EQ(tinyxml2::XML_SUCCESS,
430                 arena->FirstChildElement("bins-total")->QueryIntText(&val));
431 
432       auto bin = arena->FirstChildElement("bin");
433       for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
434         if (strcmp(bin->Name(), "bin") == 0) {
435           ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
436           ASSERT_EQ(tinyxml2::XML_SUCCESS,
437                     bin->FirstChildElement("allocated")->QueryIntText(&val));
438           ASSERT_EQ(tinyxml2::XML_SUCCESS,
439                     bin->FirstChildElement("nmalloc")->QueryIntText(&val));
440           ASSERT_EQ(tinyxml2::XML_SUCCESS,
441                     bin->FirstChildElement("ndalloc")->QueryIntText(&val));
442         }
443       }
444     }
445   } else if (version == "scudo-1") {
446     auto element = root->FirstChildElement();
447     for (; element != nullptr; element = element->NextSiblingElement()) {
448       int val;
449 
450       ASSERT_STREQ("alloc", element->Name());
451       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
452       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
453     }
454   } else {
455     // Do not verify output for debug malloc.
456     ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
457   }
458 #endif
459 }
460 
TEST(malloc,malloc_info_matches_mallinfo)461 TEST(malloc, malloc_info_matches_mallinfo) {
462 #ifdef __BIONIC__
463   SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
464 
465   TemporaryFile tf;
466   ASSERT_TRUE(tf.fd != -1);
467   FILE* fp = fdopen(tf.fd, "w+");
468   tf.release();
469   ASSERT_TRUE(fp != nullptr);
470   size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
471   ASSERT_EQ(0, malloc_info(0, fp));
472   size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
473   ASSERT_EQ(0, fclose(fp));
474 
475   std::string contents;
476   ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
477 
478   tinyxml2::XMLDocument doc;
479   ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
480 
481   size_t total_allocated_bytes = 0;
482   auto root = doc.FirstChildElement();
483   ASSERT_NE(nullptr, root);
484   ASSERT_STREQ("malloc", root->Name());
485   std::string version(root->Attribute("version"));
486   if (version == "jemalloc-1") {
487     auto arena = root->FirstChildElement();
488     for (; arena != nullptr; arena = arena->NextSiblingElement()) {
489       int val;
490 
491       ASSERT_STREQ("heap", arena->Name());
492       ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
493       ASSERT_EQ(tinyxml2::XML_SUCCESS,
494                 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
495       total_allocated_bytes += val;
496       ASSERT_EQ(tinyxml2::XML_SUCCESS,
497                 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
498       total_allocated_bytes += val;
499       ASSERT_EQ(tinyxml2::XML_SUCCESS,
500                 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
501       total_allocated_bytes += val;
502       ASSERT_EQ(tinyxml2::XML_SUCCESS,
503                 arena->FirstChildElement("bins-total")->QueryIntText(&val));
504     }
505     // The total needs to be between the mallinfo call before and after
506     // since malloc_info allocates some memory.
507     EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
508     EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
509   } else if (version == "scudo-1") {
510     auto element = root->FirstChildElement();
511     for (; element != nullptr; element = element->NextSiblingElement()) {
512       ASSERT_STREQ("alloc", element->Name());
513       int size;
514       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
515       int count;
516       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
517       total_allocated_bytes += size * count;
518     }
519     // Scudo only gives the information on the primary, so simply make
520     // sure that the value is non-zero.
521     EXPECT_NE(0U, total_allocated_bytes);
522   } else {
523     // Do not verify output for debug malloc.
524     ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
525   }
526 #endif
527 }
528 
TEST(malloc,calloc_usable_size)529 TEST(malloc, calloc_usable_size) {
530   for (size_t size = 1; size <= 2048; size++) {
531     void* pointer = malloc(size);
532     ASSERT_TRUE(pointer != nullptr);
533     memset(pointer, 0xeb, malloc_usable_size(pointer));
534     free(pointer);
535 
536     // We should get a previous pointer that has been set to non-zero.
537     // If calloc does not zero out all of the data, this will fail.
538     uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
539     ASSERT_TRUE(pointer != nullptr);
540     size_t usable_size = malloc_usable_size(zero_mem);
541     for (size_t i = 0; i < usable_size; i++) {
542       ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
543     }
544     free(zero_mem);
545   }
546 }
547 
TEST(malloc,malloc_0)548 TEST(malloc, malloc_0) {
549   void* p = malloc(0);
550   ASSERT_TRUE(p != nullptr);
551   free(p);
552 }
553 
TEST(malloc,calloc_0_0)554 TEST(malloc, calloc_0_0) {
555   void* p = calloc(0, 0);
556   ASSERT_TRUE(p != nullptr);
557   free(p);
558 }
559 
TEST(malloc,calloc_0_1)560 TEST(malloc, calloc_0_1) {
561   void* p = calloc(0, 1);
562   ASSERT_TRUE(p != nullptr);
563   free(p);
564 }
565 
TEST(malloc,calloc_1_0)566 TEST(malloc, calloc_1_0) {
567   void* p = calloc(1, 0);
568   ASSERT_TRUE(p != nullptr);
569   free(p);
570 }
571 
TEST(malloc,realloc_nullptr_0)572 TEST(malloc, realloc_nullptr_0) {
573   // realloc(nullptr, size) is actually malloc(size).
574   void* p = realloc(nullptr, 0);
575   ASSERT_TRUE(p != nullptr);
576   free(p);
577 }
578 
TEST(malloc,realloc_0)579 TEST(malloc, realloc_0) {
580   void* p = malloc(1024);
581   ASSERT_TRUE(p != nullptr);
582   // realloc(p, 0) is actually free(p).
583   void* p2 = realloc(p, 0);
584   ASSERT_TRUE(p2 == nullptr);
585 }
586 
587 constexpr size_t MAX_LOOPS = 200;
588 
589 // Make sure that memory returned by malloc is aligned to allow these data types.
TEST(malloc,verify_alignment)590 TEST(malloc, verify_alignment) {
591   uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
592   uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
593   long double** values_ldouble = new long double*[MAX_LOOPS];
594   // Use filler to attempt to force the allocator to get potentially bad alignments.
595   void** filler = new void*[MAX_LOOPS];
596 
597   for (size_t i = 0; i < MAX_LOOPS; i++) {
598     // Check uint32_t pointers.
599     filler[i] = malloc(1);
600     ASSERT_TRUE(filler[i] != nullptr);
601 
602     values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
603     ASSERT_TRUE(values_32[i] != nullptr);
604     *values_32[i] = i;
605     ASSERT_EQ(*values_32[i], i);
606     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
607 
608     free(filler[i]);
609   }
610 
611   for (size_t i = 0; i < MAX_LOOPS; i++) {
612     // Check uint64_t pointers.
613     filler[i] = malloc(1);
614     ASSERT_TRUE(filler[i] != nullptr);
615 
616     values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
617     ASSERT_TRUE(values_64[i] != nullptr);
618     *values_64[i] = 0x1000 + i;
619     ASSERT_EQ(*values_64[i], 0x1000 + i);
620     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
621 
622     free(filler[i]);
623   }
624 
625   for (size_t i = 0; i < MAX_LOOPS; i++) {
626     // Check long double pointers.
627     filler[i] = malloc(1);
628     ASSERT_TRUE(filler[i] != nullptr);
629 
630     values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
631     ASSERT_TRUE(values_ldouble[i] != nullptr);
632     *values_ldouble[i] = 5.5 + i;
633     ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
634     // 32 bit glibc has a long double size of 12 bytes, so hardcode the
635     // required alignment to 0x7.
636 #if !defined(__BIONIC__) && !defined(__LP64__)
637     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
638 #else
639     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
640 #endif
641 
642     free(filler[i]);
643   }
644 
645   for (size_t i = 0; i < MAX_LOOPS; i++) {
646     free(values_32[i]);
647     free(values_64[i]);
648     free(values_ldouble[i]);
649   }
650 
651   delete[] filler;
652   delete[] values_32;
653   delete[] values_64;
654   delete[] values_ldouble;
655 }
656 
TEST(malloc,mallopt_smoke)657 TEST(malloc, mallopt_smoke) {
658   errno = 0;
659   ASSERT_EQ(0, mallopt(-1000, 1));
660   // mallopt doesn't set errno.
661   ASSERT_EQ(0, errno);
662 }
663 
TEST(malloc,mallopt_decay)664 TEST(malloc, mallopt_decay) {
665 #if defined(__BIONIC__)
666   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
667   errno = 0;
668   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
669   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
670   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
671   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
672 #else
673   GTEST_SKIP() << "bionic-only test";
674 #endif
675 }
676 
TEST(malloc,mallopt_purge)677 TEST(malloc, mallopt_purge) {
678 #if defined(__BIONIC__)
679   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
680   errno = 0;
681   ASSERT_EQ(1, mallopt(M_PURGE, 0));
682 #else
683   GTEST_SKIP() << "bionic-only test";
684 #endif
685 }
686 
687 #if defined(__BIONIC__)
GetAllocatorVersion(bool * allocator_scudo)688 static void GetAllocatorVersion(bool* allocator_scudo) {
689   TemporaryFile tf;
690   ASSERT_TRUE(tf.fd != -1);
691   FILE* fp = fdopen(tf.fd, "w+");
692   tf.release();
693   ASSERT_TRUE(fp != nullptr);
694   ASSERT_EQ(0, malloc_info(0, fp));
695   ASSERT_EQ(0, fclose(fp));
696 
697   std::string contents;
698   ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
699 
700   tinyxml2::XMLDocument doc;
701   ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
702 
703   auto root = doc.FirstChildElement();
704   ASSERT_NE(nullptr, root);
705   ASSERT_STREQ("malloc", root->Name());
706   std::string version(root->Attribute("version"));
707   *allocator_scudo = (version == "scudo-1");
708 }
709 #endif
710 
TEST(malloc,mallopt_scudo_only_options)711 TEST(malloc, mallopt_scudo_only_options) {
712 #if defined(__BIONIC__)
713   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
714   bool allocator_scudo;
715   GetAllocatorVersion(&allocator_scudo);
716   if (!allocator_scudo) {
717     GTEST_SKIP() << "scudo allocator only test";
718   }
719   ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
720   ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
721   ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
722 #else
723   GTEST_SKIP() << "bionic-only test";
724 #endif
725 }
726 
TEST(malloc,reallocarray_overflow)727 TEST(malloc, reallocarray_overflow) {
728 #if HAVE_REALLOCARRAY
729   // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
730   size_t a = static_cast<size_t>(INTPTR_MIN + 4);
731   size_t b = 2;
732 
733   errno = 0;
734   ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
735   ASSERT_EQ(ENOMEM, errno);
736 
737   errno = 0;
738   ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
739   ASSERT_EQ(ENOMEM, errno);
740 #else
741   GTEST_SKIP() << "reallocarray not available";
742 #endif
743 }
744 
TEST(malloc,reallocarray)745 TEST(malloc, reallocarray) {
746 #if HAVE_REALLOCARRAY
747   void* p = reallocarray(nullptr, 2, 32);
748   ASSERT_TRUE(p != nullptr);
749   ASSERT_GE(malloc_usable_size(p), 64U);
750 #else
751   GTEST_SKIP() << "reallocarray not available";
752 #endif
753 }
754 
TEST(malloc,mallinfo)755 TEST(malloc, mallinfo) {
756 #if defined(__BIONIC__)
757   SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
758   static size_t sizes[] = {
759     8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
760   };
761 
762   constexpr static size_t kMaxAllocs = 50;
763 
764   for (size_t size : sizes) {
765     // If some of these allocations are stuck in a thread cache, then keep
766     // looping until we make an allocation that changes the total size of the
767     // memory allocated.
768     // jemalloc implementations counts the thread cache allocations against
769     // total memory allocated.
770     void* ptrs[kMaxAllocs] = {};
771     bool pass = false;
772     for (size_t i = 0; i < kMaxAllocs; i++) {
773       size_t allocated = mallinfo().uordblks;
774       ptrs[i] = malloc(size);
775       ASSERT_TRUE(ptrs[i] != nullptr);
776       size_t new_allocated = mallinfo().uordblks;
777       if (allocated != new_allocated) {
778         size_t usable_size = malloc_usable_size(ptrs[i]);
779         // Only check if the total got bigger by at least allocation size.
780         // Sometimes the mallinfo numbers can go backwards due to compaction
781         // and/or freeing of cached data.
782         if (new_allocated >= allocated + usable_size) {
783           pass = true;
784           break;
785         }
786       }
787     }
788     for (void* ptr : ptrs) {
789       free(ptr);
790     }
791     ASSERT_TRUE(pass)
792         << "For size " << size << " allocated bytes did not increase after "
793         << kMaxAllocs << " allocations.";
794   }
795 #else
796   GTEST_SKIP() << "glibc is broken";
797 #endif
798 }
799 
800 template <typename Type>
VerifyAlignment(Type * floating)801 void __attribute__((optnone)) VerifyAlignment(Type* floating) {
802   size_t expected_alignment = alignof(Type);
803   if (expected_alignment != 0) {
804     ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
805         << "Expected alignment " << expected_alignment << " ptr value " << floating;
806   }
807 }
808 
809 template <typename Type>
TestAllocateType()810 void __attribute__((optnone)) TestAllocateType() {
811   // The number of allocations to do in a row. This is to attempt to
812   // expose the worst case alignment for native allocators that use
813   // bins.
814   static constexpr size_t kMaxConsecutiveAllocs = 100;
815 
816   // Verify using new directly.
817   Type* types[kMaxConsecutiveAllocs];
818   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
819     types[i] = new Type;
820     VerifyAlignment(types[i]);
821     if (::testing::Test::HasFatalFailure()) {
822       return;
823     }
824   }
825   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
826     delete types[i];
827   }
828 
829   // Verify using malloc.
830   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
831     types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
832     ASSERT_TRUE(types[i] != nullptr);
833     VerifyAlignment(types[i]);
834     if (::testing::Test::HasFatalFailure()) {
835       return;
836     }
837   }
838   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
839     free(types[i]);
840   }
841 
842   // Verify using a vector.
843   std::vector<Type> type_vector(kMaxConsecutiveAllocs);
844   for (size_t i = 0; i < type_vector.size(); i++) {
845     VerifyAlignment(&type_vector[i]);
846     if (::testing::Test::HasFatalFailure()) {
847       return;
848     }
849   }
850 }
851 
852 #if defined(__ANDROID__)
AndroidVerifyAlignment(size_t alloc_size,size_t aligned_bytes)853 static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
854   void* ptrs[100];
855   uintptr_t mask = aligned_bytes - 1;
856   for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
857     ptrs[i] = malloc(alloc_size);
858     ASSERT_TRUE(ptrs[i] != nullptr);
859     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
860         << "Expected at least " << aligned_bytes << " byte alignment: size "
861         << alloc_size << " actual ptr " << ptrs[i];
862   }
863 }
864 #endif
865 
TEST(malloc,align_check)866 TEST(malloc, align_check) {
867   // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
868   // for a discussion of type alignment.
869   ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
870   ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
871   ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
872 
873   ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
874   ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
875   ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
876   ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
877   ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
878   ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
879   ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
880   ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
881   ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
882   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
883   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
884   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
885   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
886   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
887 
888 #if defined(__ANDROID__)
889   // On Android, there is a lot of code that expects certain alignments:
890   // - Allocations of a size that rounds up to a multiple of 16 bytes
891   //   must have at least 16 byte alignment.
892   // - Allocations of a size that rounds up to a multiple of 8 bytes and
893   //   not 16 bytes, are only required to have at least 8 byte alignment.
894   // This is regardless of whether it is in a 32 bit or 64 bit environment.
895 
896   // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
897   // a discussion of this alignment mess. The code below is enforcing
898   // strong-alignment, since who knows what code depends on this behavior now.
899   for (size_t i = 1; i <= 128; i++) {
900     size_t rounded = (i + 7) & ~7;
901     if ((rounded % 16) == 0) {
902       AndroidVerifyAlignment(i, 16);
903     } else {
904       AndroidVerifyAlignment(i, 8);
905     }
906     if (::testing::Test::HasFatalFailure()) {
907       return;
908     }
909   }
910 #endif
911 }
912 
913 // Jemalloc doesn't pass this test right now, so leave it as disabled.
TEST(malloc,DISABLED_alloc_after_fork)914 TEST(malloc, DISABLED_alloc_after_fork) {
915   // Both of these need to be a power of 2.
916   static constexpr size_t kMinAllocationSize = 8;
917   static constexpr size_t kMaxAllocationSize = 2097152;
918 
919   static constexpr size_t kNumAllocatingThreads = 5;
920   static constexpr size_t kNumForkLoops = 100;
921 
922   std::atomic_bool stop;
923 
924   // Create threads that simply allocate and free different sizes.
925   std::vector<std::thread*> threads;
926   for (size_t i = 0; i < kNumAllocatingThreads; i++) {
927     std::thread* t = new std::thread([&stop] {
928       while (!stop) {
929         for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
930           void* ptr;
931           DoNotOptimize(ptr = malloc(size));
932           free(ptr);
933         }
934       }
935     });
936     threads.push_back(t);
937   }
938 
939   // Create a thread to fork and allocate.
940   for (size_t i = 0; i < kNumForkLoops; i++) {
941     pid_t pid;
942     if ((pid = fork()) == 0) {
943       for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
944         void* ptr;
945         DoNotOptimize(ptr = malloc(size));
946         ASSERT_TRUE(ptr != nullptr);
947         // Make sure we can touch all of the allocation.
948         memset(ptr, 0x1, size);
949         ASSERT_LE(size, malloc_usable_size(ptr));
950         free(ptr);
951       }
952       _exit(10);
953     }
954     ASSERT_NE(-1, pid);
955     AssertChildExited(pid, 10);
956   }
957 
958   stop = true;
959   for (auto thread : threads) {
960     thread->join();
961     delete thread;
962   }
963 }
964 
TEST(android_mallopt,error_on_unexpected_option)965 TEST(android_mallopt, error_on_unexpected_option) {
966 #if defined(__BIONIC__)
967   const int unrecognized_option = -1;
968   errno = 0;
969   EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
970   EXPECT_EQ(ENOTSUP, errno);
971 #else
972   GTEST_SKIP() << "bionic-only test";
973 #endif
974 }
975 
IsDynamic()976 bool IsDynamic() {
977 #if defined(__LP64__)
978   Elf64_Ehdr ehdr;
979 #else
980   Elf32_Ehdr ehdr;
981 #endif
982   std::string path(android::base::GetExecutablePath());
983 
984   int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
985   if (fd == -1) {
986     // Assume dynamic on error.
987     return true;
988   }
989   bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
990   close(fd);
991   // Assume dynamic in error cases.
992   return !read_completed || ehdr.e_type == ET_DYN;
993 }
994 
TEST(android_mallopt,init_zygote_child_profiling)995 TEST(android_mallopt, init_zygote_child_profiling) {
996 #if defined(__BIONIC__)
997   // Successful call.
998   errno = 0;
999   if (IsDynamic()) {
1000     EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1001     EXPECT_EQ(0, errno);
1002   } else {
1003     // Not supported in static executables.
1004     EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1005     EXPECT_EQ(ENOTSUP, errno);
1006   }
1007 
1008   // Unexpected arguments rejected.
1009   errno = 0;
1010   char unexpected = 0;
1011   EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
1012   if (IsDynamic()) {
1013     EXPECT_EQ(EINVAL, errno);
1014   } else {
1015     EXPECT_EQ(ENOTSUP, errno);
1016   }
1017 #else
1018   GTEST_SKIP() << "bionic-only test";
1019 #endif
1020 }
1021 
1022 #if defined(__BIONIC__)
1023 template <typename FuncType>
CheckAllocationFunction(FuncType func)1024 void CheckAllocationFunction(FuncType func) {
1025   // Assumes that no more than 108MB of memory is allocated before this.
1026   size_t limit = 128 * 1024 * 1024;
1027   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1028   if (!func(20 * 1024 * 1024))
1029     exit(1);
1030   if (func(128 * 1024 * 1024))
1031     exit(1);
1032   exit(0);
1033 }
1034 #endif
1035 
TEST(android_mallopt,set_allocation_limit)1036 TEST(android_mallopt, set_allocation_limit) {
1037 #if defined(__BIONIC__)
1038   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1039               testing::ExitedWithCode(0), "");
1040   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1041               testing::ExitedWithCode(0), "");
1042   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1043               testing::ExitedWithCode(0), "");
1044   EXPECT_EXIT(CheckAllocationFunction(
1045                   [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1046               testing::ExitedWithCode(0), "");
1047   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1048                 void* ptr;
1049                 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1050               }),
1051               testing::ExitedWithCode(0), "");
1052   EXPECT_EXIT(CheckAllocationFunction(
1053                   [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1054               testing::ExitedWithCode(0), "");
1055   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1056                 void* p = malloc(1024 * 1024);
1057                 return realloc(p, bytes) != nullptr;
1058               }),
1059               testing::ExitedWithCode(0), "");
1060 #if !defined(__LP64__)
1061   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1062               testing::ExitedWithCode(0), "");
1063   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1064               testing::ExitedWithCode(0), "");
1065 #endif
1066 #else
1067   GTEST_SKIP() << "bionic extension";
1068 #endif
1069 }
1070 
TEST(android_mallopt,set_allocation_limit_multiple)1071 TEST(android_mallopt, set_allocation_limit_multiple) {
1072 #if defined(__BIONIC__)
1073   // Only the first set should work.
1074   size_t limit = 256 * 1024 * 1024;
1075   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1076   limit = 32 * 1024 * 1024;
1077   ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1078 #else
1079   GTEST_SKIP() << "bionic extension";
1080 #endif
1081 }
1082 
1083 #if defined(__BIONIC__)
1084 static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1085 
GetMaxAllocations()1086 static size_t GetMaxAllocations() {
1087   size_t max_pointers = 0;
1088   void* ptrs[20];
1089   for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1090     ptrs[i] = malloc(kAllocationSize);
1091     if (ptrs[i] == nullptr) {
1092       max_pointers = i;
1093       break;
1094     }
1095   }
1096   for (size_t i = 0; i < max_pointers; i++) {
1097     free(ptrs[i]);
1098   }
1099   return max_pointers;
1100 }
1101 
VerifyMaxPointers(size_t max_pointers)1102 static void VerifyMaxPointers(size_t max_pointers) {
1103   // Now verify that we can allocate the same number as before.
1104   void* ptrs[20];
1105   for (size_t i = 0; i < max_pointers; i++) {
1106     ptrs[i] = malloc(kAllocationSize);
1107     ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1108   }
1109 
1110   // Make sure the next allocation still fails.
1111   ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1112   for (size_t i = 0; i < max_pointers; i++) {
1113     free(ptrs[i]);
1114   }
1115 }
1116 #endif
1117 
TEST(android_mallopt,set_allocation_limit_realloc_increase)1118 TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1119 #if defined(__BIONIC__)
1120   size_t limit = 128 * 1024 * 1024;
1121   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1122 
1123   size_t max_pointers = GetMaxAllocations();
1124   ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1125 
1126   void* memory = malloc(10 * 1024 * 1024);
1127   ASSERT_TRUE(memory != nullptr);
1128 
1129   // Increase size.
1130   memory = realloc(memory, 20 * 1024 * 1024);
1131   ASSERT_TRUE(memory != nullptr);
1132   memory = realloc(memory, 40 * 1024 * 1024);
1133   ASSERT_TRUE(memory != nullptr);
1134   memory = realloc(memory, 60 * 1024 * 1024);
1135   ASSERT_TRUE(memory != nullptr);
1136   memory = realloc(memory, 80 * 1024 * 1024);
1137   ASSERT_TRUE(memory != nullptr);
1138   // Now push past limit.
1139   memory = realloc(memory, 130 * 1024 * 1024);
1140   ASSERT_TRUE(memory == nullptr);
1141 
1142   VerifyMaxPointers(max_pointers);
1143 #else
1144   GTEST_SKIP() << "bionic extension";
1145 #endif
1146 }
1147 
TEST(android_mallopt,set_allocation_limit_realloc_decrease)1148 TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1149 #if defined(__BIONIC__)
1150   size_t limit = 100 * 1024 * 1024;
1151   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1152 
1153   size_t max_pointers = GetMaxAllocations();
1154   ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1155 
1156   void* memory = malloc(80 * 1024 * 1024);
1157   ASSERT_TRUE(memory != nullptr);
1158 
1159   // Decrease size.
1160   memory = realloc(memory, 60 * 1024 * 1024);
1161   ASSERT_TRUE(memory != nullptr);
1162   memory = realloc(memory, 40 * 1024 * 1024);
1163   ASSERT_TRUE(memory != nullptr);
1164   memory = realloc(memory, 20 * 1024 * 1024);
1165   ASSERT_TRUE(memory != nullptr);
1166   memory = realloc(memory, 10 * 1024 * 1024);
1167   ASSERT_TRUE(memory != nullptr);
1168   free(memory);
1169 
1170   VerifyMaxPointers(max_pointers);
1171 #else
1172   GTEST_SKIP() << "bionic extension";
1173 #endif
1174 }
1175 
TEST(android_mallopt,set_allocation_limit_realloc_free)1176 TEST(android_mallopt, set_allocation_limit_realloc_free) {
1177 #if defined(__BIONIC__)
1178   size_t limit = 100 * 1024 * 1024;
1179   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1180 
1181   size_t max_pointers = GetMaxAllocations();
1182   ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1183 
1184   void* memory = malloc(60 * 1024 * 1024);
1185   ASSERT_TRUE(memory != nullptr);
1186 
1187   memory = realloc(memory, 0);
1188   ASSERT_TRUE(memory == nullptr);
1189 
1190   VerifyMaxPointers(max_pointers);
1191 #else
1192   GTEST_SKIP() << "bionic extension";
1193 #endif
1194 }
1195 
1196 #if defined(__BIONIC__)
SetAllocationLimit(void * data)1197 static void* SetAllocationLimit(void* data) {
1198   std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1199   while (!go->load()) {
1200   }
1201   size_t limit = 500 * 1024 * 1024;
1202   if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1203     return reinterpret_cast<void*>(-1);
1204   }
1205   return nullptr;
1206 }
1207 
SetAllocationLimitMultipleThreads()1208 static void SetAllocationLimitMultipleThreads() {
1209   std::atomic_bool go;
1210   go = false;
1211 
1212   static constexpr size_t kNumThreads = 4;
1213   pthread_t threads[kNumThreads];
1214   for (size_t i = 0; i < kNumThreads; i++) {
1215     ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1216   }
1217 
1218   // Let them go all at once.
1219   go = true;
1220   // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1221   // heapprofd handler.
1222   union sigval signal_value;
1223   signal_value.sival_int = 0;
1224   ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
1225 
1226   size_t num_successful = 0;
1227   for (size_t i = 0; i < kNumThreads; i++) {
1228     void* result;
1229     ASSERT_EQ(0, pthread_join(threads[i], &result));
1230     if (result != nullptr) {
1231       num_successful++;
1232     }
1233   }
1234   ASSERT_EQ(1U, num_successful);
1235   exit(0);
1236 }
1237 #endif
1238 
TEST(android_mallopt,set_allocation_limit_multiple_threads)1239 TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1240 #if defined(__BIONIC__)
1241   if (IsDynamic()) {
1242     ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1243   }
1244 
1245   // Run this a number of times as a stress test.
1246   for (size_t i = 0; i < 100; i++) {
1247     // Not using ASSERT_EXIT because errors messages are not displayed.
1248     pid_t pid;
1249     if ((pid = fork()) == 0) {
1250       ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1251     }
1252     ASSERT_NE(-1, pid);
1253     int status;
1254     ASSERT_EQ(pid, wait(&status));
1255     ASSERT_EQ(0, WEXITSTATUS(status));
1256   }
1257 #else
1258   GTEST_SKIP() << "bionic extension";
1259 #endif
1260 }
1261 
TestHeapZeroing(int num_iterations,int (* get_alloc_size)(int iteration))1262 void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1263   std::vector<void*> allocs;
1264   constexpr int kMaxBytesToCheckZero = 64;
1265   const char kBlankMemory[kMaxBytesToCheckZero] = {};
1266 
1267   for (int i = 0; i < num_iterations; ++i) {
1268     int size = get_alloc_size(i);
1269     allocs.push_back(malloc(size));
1270     memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1271   }
1272 
1273   for (void* alloc : allocs) {
1274     free(alloc);
1275   }
1276   allocs.clear();
1277 
1278   for (int i = 0; i < num_iterations; ++i) {
1279     int size = get_alloc_size(i);
1280     allocs.push_back(malloc(size));
1281     ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1282   }
1283 
1284   for (void* alloc : allocs) {
1285     free(alloc);
1286   }
1287 }
1288 
TEST(malloc,zero_init)1289 TEST(malloc, zero_init) {
1290 #if defined(__BIONIC__)
1291   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1292   bool allocator_scudo;
1293   GetAllocatorVersion(&allocator_scudo);
1294   if (!allocator_scudo) {
1295     GTEST_SKIP() << "scudo allocator only test";
1296   }
1297 
1298   mallopt(M_BIONIC_ZERO_INIT, 1);
1299 
1300   // Test using a block of 4K small (1-32 byte) allocations.
1301   TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1302     return 1 + iteration % 32;
1303   });
1304 
1305   // Also test large allocations that land in the scudo secondary, as this is
1306   // the only part of Scudo that's changed by enabling zero initialization with
1307   // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1308   // release secondary allocations back to the OS) was modified to 0ms/1ms by
1309   // mallopt_decay. Ensure that we delay for at least a second before releasing
1310   // pages to the OS in order to avoid implicit zeroing by the kernel.
1311   mallopt(M_DECAY_TIME, 1000);
1312   TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1313     return 1 << (19 + iteration % 4);
1314   });
1315 
1316 #else
1317   GTEST_SKIP() << "bionic-only test";
1318 #endif
1319 }
1320 
1321 // Note that MTE is enabled on cc_tests on devices that support MTE.
TEST(malloc,disable_mte)1322 TEST(malloc, disable_mte) {
1323 #if defined(__BIONIC__)
1324   if (!mte_supported()) {
1325     GTEST_SKIP() << "This function can only be tested with MTE";
1326   }
1327 
1328   sem_t sem;
1329   ASSERT_EQ(0, sem_init(&sem, 0, 0));
1330 
1331   pthread_t thread;
1332   ASSERT_EQ(0, pthread_create(
1333                    &thread, nullptr,
1334                    [](void* ptr) -> void* {
1335                      auto* sem = reinterpret_cast<sem_t*>(ptr);
1336                      sem_wait(sem);
1337                      return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1338                    },
1339                    &sem));
1340 
1341   ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
1342   ASSERT_EQ(0, sem_post(&sem));
1343 
1344   int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1345   ASSERT_EQ(PR_MTE_TCF_NONE, my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1346 
1347   void* retval;
1348   ASSERT_EQ(0, pthread_join(thread, &retval));
1349   int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1350   ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
1351 #else
1352   GTEST_SKIP() << "bionic extension";
1353 #endif
1354 }
1355 
TEST(malloc,allocation_slack)1356 TEST(malloc, allocation_slack) {
1357 #if defined(__BIONIC__)
1358   bool allocator_scudo;
1359   GetAllocatorVersion(&allocator_scudo);
1360   if (!allocator_scudo) {
1361     GTEST_SKIP() << "scudo allocator only test";
1362   }
1363 
1364   // Test that older target SDK levels let you access a few bytes off the end of
1365   // a large allocation.
1366   android_set_application_target_sdk_version(29);
1367   auto p = std::make_unique<char[]>(131072);
1368   volatile char *vp = p.get();
1369   volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1370 #else
1371   GTEST_SKIP() << "bionic extension";
1372 #endif
1373 }
1374 
1375 // Regression test for b/206701345 -- scudo bug, MTE only.
1376 // Fix: https://reviews.llvm.org/D105261
1377 // Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
TEST(malloc,realloc_mte_crash_b206701345)1378 TEST(malloc, realloc_mte_crash_b206701345) {
1379   // We want to hit in-place realloc at the very end of an mmap-ed region.  Not
1380   // all size classes allow such placement - mmap size has to be divisible by
1381   // the block size. At the time of writing this could only be reproduced with
1382   // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1383   // future. Try several different classes at the lower end.
1384   std::vector<void*> ptrs(10000);
1385   for (int i = 1; i < 32; ++i) {
1386     size_t sz = 16 * i - 1;
1387     for (void*& p : ptrs) {
1388       p = realloc(malloc(sz), sz + 1);
1389     }
1390 
1391     for (void* p : ptrs) {
1392       free(p);
1393     }
1394   }
1395 }
1396