1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <sys/mman.h>
18
19 #include <gtest/gtest.h>
20
21 #include <ion/ion.h>
22
23 #include "ion_test_fixture.h"
24
25 class Exit : public IonTest {};
26
TEST_F(Exit,WithAllocFd)27 TEST_F(Exit, WithAllocFd) {
28 static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
29 for (const auto& heap : ion_heaps) {
30 for (size_t size : allocationSizes) {
31 SCOPED_TRACE(::testing::Message()
32 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
33 SCOPED_TRACE(::testing::Message() << "size " << size);
34 EXPECT_EXIT(
35 {
36 int handle_fd = -1;
37
38 ASSERT_EQ(0,
39 ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &handle_fd));
40 ASSERT_NE(-1, handle_fd);
41 exit(0);
42 },
43 ::testing::ExitedWithCode(0), "");
44 }
45 }
46 }
47
TEST_F(Exit,WithRepeatedAllocFd)48 TEST_F(Exit, WithRepeatedAllocFd) {
49 static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
50 for (const auto& heap : ion_heaps) {
51 for (size_t size : allocationSizes) {
52 for (unsigned int i = 0; i < 1024; i++) {
53 SCOPED_TRACE(::testing::Message()
54 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
55 SCOPED_TRACE(::testing::Message() << "size " << size);
56 ASSERT_EXIT(
57 {
58 int handle_fd = -1;
59
60 ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0,
61 &handle_fd));
62 ASSERT_NE(-1, handle_fd);
63 exit(0);
64 },
65 ::testing::ExitedWithCode(0), "")
66 << "failed on heap " << heap.name << ":" << heap.type << ":" << heap.heap_id
67 << " and size " << size << " on iteration " << i;
68 }
69 }
70 }
71 }
72
TEST_F(Exit,WithMapping)73 TEST_F(Exit, WithMapping) {
74 static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
75 for (const auto& heap : ion_heaps) {
76 for (size_t size : allocationSizes) {
77 SCOPED_TRACE(::testing::Message()
78 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
79 SCOPED_TRACE(::testing::Message() << "size " << size);
80 EXPECT_EXIT(
81 {
82 int map_fd = -1;
83
84 ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
85 ASSERT_GE(map_fd, 0);
86
87 void* ptr;
88 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
89 ASSERT_TRUE(ptr != NULL);
90 exit(0);
91 },
92 ::testing::ExitedWithCode(0), "");
93 }
94 }
95 }
96
TEST_F(Exit,WithPartialMapping)97 TEST_F(Exit, WithPartialMapping) {
98 static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
99 for (const auto& heap : ion_heaps) {
100 for (size_t size : allocationSizes) {
101 SCOPED_TRACE(::testing::Message()
102 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
103 SCOPED_TRACE(::testing::Message() << "size " << size);
104 EXPECT_EXIT(
105 {
106 int map_fd = -1;
107
108 ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
109 ASSERT_GE(map_fd, 0);
110
111 void* ptr;
112 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
113 ASSERT_TRUE(ptr != NULL);
114
115 ASSERT_EQ(0, munmap(ptr, size / 2));
116 exit(0);
117 },
118 ::testing::ExitedWithCode(0), "");
119 }
120 }
121 }
122
TEST_F(Exit,WithMappingCached)123 TEST_F(Exit, WithMappingCached) {
124 static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
125 for (const auto& heap : ion_heaps) {
126 for (size_t size : allocationSizes) {
127 SCOPED_TRACE(::testing::Message()
128 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
129 SCOPED_TRACE(::testing::Message() << "size " << size);
130 EXPECT_EXIT(
131 {
132 int map_fd = -1;
133
134 ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
135 ION_FLAG_CACHED, &map_fd));
136 ASSERT_GE(map_fd, 0);
137
138 void* ptr;
139 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
140 ASSERT_TRUE(ptr != NULL);
141 exit(0);
142 },
143 ::testing::ExitedWithCode(0), "");
144 }
145 }
146 }
147
TEST_F(Exit,WithPartialMappingCached)148 TEST_F(Exit, WithPartialMappingCached) {
149 static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
150 for (const auto& heap : ion_heaps) {
151 for (size_t size : allocationSizes) {
152 SCOPED_TRACE(::testing::Message()
153 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
154 SCOPED_TRACE(::testing::Message() << "size " << size);
155 EXPECT_EXIT(
156 {
157 int map_fd = -1;
158
159 ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
160 ION_FLAG_CACHED, &map_fd));
161 ASSERT_GE(map_fd, 0);
162
163 void* ptr;
164 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
165 ASSERT_TRUE(ptr != NULL);
166
167 ASSERT_EQ(0, munmap(ptr, size / 2));
168 exit(0);
169 },
170 ::testing::ExitedWithCode(0), "");
171 }
172 }
173 }
174
TEST_F(Exit,WithMappingNeedsSync)175 TEST_F(Exit, WithMappingNeedsSync) {
176 static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
177 for (const auto& heap : ion_heaps) {
178 for (size_t size : allocationSizes) {
179 SCOPED_TRACE(::testing::Message()
180 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
181 SCOPED_TRACE(::testing::Message() << "size " << size);
182 EXPECT_EXIT(
183 {
184 int map_fd = -1;
185
186 ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
187 ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC,
188 &map_fd));
189 ASSERT_GE(map_fd, 0);
190
191 void* ptr;
192 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
193 ASSERT_TRUE(ptr != NULL);
194 exit(0);
195 },
196 ::testing::ExitedWithCode(0), "");
197 }
198 }
199 }
200
TEST_F(Exit,WithPartialMappingNeedsSync)201 TEST_F(Exit, WithPartialMappingNeedsSync) {
202 static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
203 for (const auto& heap : ion_heaps) {
204 for (size_t size : allocationSizes) {
205 SCOPED_TRACE(::testing::Message()
206 << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
207 SCOPED_TRACE(::testing::Message() << "size " << size);
208 EXPECT_EXIT(
209 {
210 int map_fd = -1;
211
212 ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
213 ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC,
214 &map_fd));
215 ASSERT_GE(map_fd, 0);
216
217 void* ptr;
218 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
219 ASSERT_TRUE(ptr != NULL);
220
221 ASSERT_EQ(0, munmap(ptr, size / 2));
222 exit(0);
223 },
224 ::testing::ExitedWithCode(0), "");
225 }
226 }
227 }
228