1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21
22 #include <memory>
23 #include <mutex>
24 #include <string>
25
26 #include <android-base/stringprintf.h>
27
28 #include <unwindstack/Elf.h>
29 #include <unwindstack/MapInfo.h>
30 #include <unwindstack/Maps.h>
31
32 #include "MemoryFileAtOffset.h"
33 #include "MemoryRange.h"
34
35 namespace unwindstack {
36
InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset * memory)37 bool MapInfo::InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset* memory) {
38 // One last attempt, see if the previous map is read-only with the
39 // same name and stretches across this map.
40 if (prev_real_map() == nullptr || prev_real_map()->flags() != PROT_READ) {
41 return false;
42 }
43
44 uint64_t map_size = end() - prev_real_map()->end();
45 if (!memory->Init(name(), prev_real_map()->offset(), map_size)) {
46 return false;
47 }
48
49 uint64_t max_size;
50 if (!Elf::GetInfo(memory, &max_size) || max_size < map_size) {
51 return false;
52 }
53
54 if (!memory->Init(name(), prev_real_map()->offset(), max_size)) {
55 return false;
56 }
57
58 set_elf_offset(offset() - prev_real_map()->offset());
59 set_elf_start_offset(prev_real_map()->offset());
60 return true;
61 }
62
GetFileMemory()63 Memory* MapInfo::GetFileMemory() {
64 std::unique_ptr<MemoryFileAtOffset> memory(new MemoryFileAtOffset);
65 if (offset() == 0) {
66 if (memory->Init(name(), 0)) {
67 return memory.release();
68 }
69 return nullptr;
70 }
71
72 // These are the possibilities when the offset is non-zero.
73 // - There is an elf file embedded in a file, and the offset is the
74 // the start of the elf in the file.
75 // - There is an elf file embedded in a file, and the offset is the
76 // the start of the executable part of the file. The actual start
77 // of the elf is in the read-only segment preceeding this map.
78 // - The whole file is an elf file, and the offset needs to be saved.
79 //
80 // Map in just the part of the file for the map. If this is not
81 // a valid elf, then reinit as if the whole file is an elf file.
82 // If the offset is a valid elf, then determine the size of the map
83 // and reinit to that size. This is needed because the dynamic linker
84 // only maps in a portion of the original elf, and never the symbol
85 // file data.
86 uint64_t map_size = end() - start();
87 if (!memory->Init(name(), offset(), map_size)) {
88 return nullptr;
89 }
90
91 // Check if the start of this map is an embedded elf.
92 uint64_t max_size = 0;
93 if (Elf::GetInfo(memory.get(), &max_size)) {
94 set_elf_start_offset(offset());
95 if (max_size > map_size) {
96 if (memory->Init(name(), offset(), max_size)) {
97 return memory.release();
98 }
99 // Try to reinit using the default map_size.
100 if (memory->Init(name(), offset(), map_size)) {
101 return memory.release();
102 }
103 set_elf_start_offset(0);
104 return nullptr;
105 }
106 return memory.release();
107 }
108
109 // No elf at offset, try to init as if the whole file is an elf.
110 if (memory->Init(name(), 0) && Elf::IsValidElf(memory.get())) {
111 set_elf_offset(offset());
112 // Need to check how to set the elf start offset. If this map is not
113 // the r-x map of a r-- map, then use the real offset value. Otherwise,
114 // use 0.
115 if (prev_real_map() == nullptr || prev_real_map()->offset() != 0 ||
116 prev_real_map()->flags() != PROT_READ || prev_real_map()->name() != name()) {
117 set_elf_start_offset(offset());
118 }
119 return memory.release();
120 }
121
122 // See if the map previous to this one contains a read-only map
123 // that represents the real start of the elf data.
124 if (InitFileMemoryFromPreviousReadOnlyMap(memory.get())) {
125 return memory.release();
126 }
127
128 // Failed to find elf at start of file or at read-only map, return
129 // file object from the current map.
130 if (memory->Init(name(), offset(), map_size)) {
131 return memory.release();
132 }
133 return nullptr;
134 }
135
CreateMemory(const std::shared_ptr<Memory> & process_memory)136 Memory* MapInfo::CreateMemory(const std::shared_ptr<Memory>& process_memory) {
137 if (end() <= start()) {
138 return nullptr;
139 }
140
141 set_elf_offset(0);
142
143 // Fail on device maps.
144 if (flags() & MAPS_FLAGS_DEVICE_MAP) {
145 return nullptr;
146 }
147
148 // First try and use the file associated with the info.
149 if (!name().empty()) {
150 Memory* memory = GetFileMemory();
151 if (memory != nullptr) {
152 return memory;
153 }
154 }
155
156 if (process_memory == nullptr) {
157 return nullptr;
158 }
159
160 set_memory_backed_elf(true);
161
162 // Need to verify that this elf is valid. It's possible that
163 // only part of the elf file to be mapped into memory is in the executable
164 // map. In this case, there will be another read-only map that includes the
165 // first part of the elf file. This is done if the linker rosegment
166 // option is used.
167 std::unique_ptr<MemoryRange> memory(new MemoryRange(process_memory, start(), end() - start(), 0));
168 if (Elf::IsValidElf(memory.get())) {
169 set_elf_start_offset(offset());
170
171 // Might need to peek at the next map to create a memory object that
172 // includes that map too.
173 if (offset() != 0 || name().empty() || next_real_map() == nullptr ||
174 offset() >= next_real_map()->offset() || next_real_map()->name() != name()) {
175 return memory.release();
176 }
177
178 // There is a possibility that the elf object has already been created
179 // in the next map. Since this should be a very uncommon path, just
180 // redo the work. If this happens, the elf for this map will eventually
181 // be discarded.
182 MemoryRanges* ranges = new MemoryRanges;
183 ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), 0));
184 ranges->Insert(new MemoryRange(process_memory, next_real_map()->start(),
185 next_real_map()->end() - next_real_map()->start(),
186 next_real_map()->offset() - offset()));
187
188 return ranges;
189 }
190
191 // Find the read-only map by looking at the previous map. The linker
192 // doesn't guarantee that this invariant will always be true. However,
193 // if that changes, there is likely something else that will change and
194 // break something.
195 if (offset() == 0 || name().empty() || prev_real_map() == nullptr ||
196 prev_real_map()->name() != name() || prev_real_map()->offset() >= offset()) {
197 set_memory_backed_elf(false);
198 return nullptr;
199 }
200
201 // Make sure that relative pc values are corrected properly.
202 set_elf_offset(offset() - prev_real_map()->offset());
203 // Use this as the elf start offset, otherwise, you always get offsets into
204 // the r-x section, which is not quite the right information.
205 set_elf_start_offset(prev_real_map()->offset());
206
207 MemoryRanges* ranges = new MemoryRanges;
208 ranges->Insert(new MemoryRange(process_memory, prev_real_map()->start(),
209 prev_real_map()->end() - prev_real_map()->start(), 0));
210 ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), elf_offset()));
211
212 return ranges;
213 }
214
GetElf(const std::shared_ptr<Memory> & process_memory,ArchEnum expected_arch)215 Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum expected_arch) {
216 {
217 // Make sure no other thread is trying to add the elf to this map.
218 std::lock_guard<std::mutex> guard(elf_mutex());
219
220 if (elf().get() != nullptr) {
221 return elf().get();
222 }
223
224 bool locked = false;
225 if (Elf::CachingEnabled() && !name().empty()) {
226 Elf::CacheLock();
227 locked = true;
228 if (Elf::CacheGet(this)) {
229 Elf::CacheUnlock();
230 return elf().get();
231 }
232 }
233
234 Memory* memory = CreateMemory(process_memory);
235 if (locked) {
236 if (Elf::CacheAfterCreateMemory(this)) {
237 delete memory;
238 Elf::CacheUnlock();
239 return elf().get();
240 }
241 }
242 elf().reset(new Elf(memory));
243 // If the init fails, keep the elf around as an invalid object so we
244 // don't try to reinit the object.
245 elf()->Init();
246 if (elf()->valid() && expected_arch != elf()->arch()) {
247 // Make the elf invalid, mismatch between arch and expected arch.
248 elf()->Invalidate();
249 }
250
251 if (locked) {
252 Elf::CacheAdd(this);
253 Elf::CacheUnlock();
254 }
255 }
256
257 if (!elf()->valid()) {
258 set_elf_start_offset(offset());
259 } else if (prev_real_map() != nullptr && elf_start_offset() != offset() &&
260 prev_real_map()->offset() == elf_start_offset() && prev_real_map()->name() == name()) {
261 // If there is a read-only map then a read-execute map that represents the
262 // same elf object, make sure the previous map is using the same elf
263 // object if it hasn't already been set.
264 std::lock_guard<std::mutex> guard(prev_real_map()->elf_mutex());
265 if (prev_real_map()->elf().get() == nullptr) {
266 prev_real_map()->set_elf(elf());
267 prev_real_map()->set_memory_backed_elf(memory_backed_elf());
268 } else {
269 // Discard this elf, and use the elf from the previous map instead.
270 set_elf(prev_real_map()->elf());
271 }
272 }
273 return elf().get();
274 }
275
GetFunctionName(uint64_t addr,SharedString * name,uint64_t * func_offset)276 bool MapInfo::GetFunctionName(uint64_t addr, SharedString* name, uint64_t* func_offset) {
277 {
278 // Make sure no other thread is trying to update this elf object.
279 std::lock_guard<std::mutex> guard(elf_mutex());
280 if (elf() == nullptr) {
281 return false;
282 }
283 }
284 // No longer need the lock, once the elf object is created, it is not deleted
285 // until this object is deleted.
286 return elf()->GetFunctionName(addr, name, func_offset);
287 }
288
GetLoadBias(const std::shared_ptr<Memory> & process_memory)289 uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
290 int64_t cur_load_bias = load_bias().load();
291 if (cur_load_bias != INT64_MAX) {
292 return cur_load_bias;
293 }
294
295 {
296 // Make sure no other thread is trying to add the elf to this map.
297 std::lock_guard<std::mutex> guard(elf_mutex());
298 if (elf() != nullptr) {
299 if (elf()->valid()) {
300 cur_load_bias = elf()->GetLoadBias();
301 set_load_bias(cur_load_bias);
302 return cur_load_bias;
303 } else {
304 set_load_bias(0);
305 return 0;
306 }
307 }
308 }
309
310 // Call lightweight static function that will only read enough of the
311 // elf data to get the load bias.
312 std::unique_ptr<Memory> memory(CreateMemory(process_memory));
313 cur_load_bias = Elf::GetLoadBias(memory.get());
314 set_load_bias(cur_load_bias);
315 return cur_load_bias;
316 }
317
~MapInfo()318 MapInfo::~MapInfo() {
319 ElfFields* elf_fields = elf_fields_.load();
320 if (elf_fields != nullptr) {
321 delete elf_fields->build_id_.load();
322 delete elf_fields;
323 }
324 }
325
GetBuildID()326 SharedString MapInfo::GetBuildID() {
327 SharedString* id = build_id().load();
328 if (id != nullptr) {
329 return *id;
330 }
331
332 // No need to lock, at worst if multiple threads do this at the same
333 // time it should be detected and only one thread should win and
334 // save the data.
335
336 // Now need to see if the elf object exists.
337 // Make sure no other thread is trying to add the elf to this map.
338 elf_mutex().lock();
339 Elf* elf_obj = elf().get();
340 elf_mutex().unlock();
341 std::string result;
342 if (elf_obj != nullptr) {
343 result = elf_obj->GetBuildID();
344 } else {
345 // This will only work if we can get the file associated with this memory.
346 // If this is only available in memory, then the section name information
347 // is not present and we will not be able to find the build id info.
348 std::unique_ptr<Memory> memory(GetFileMemory());
349 if (memory != nullptr) {
350 result = Elf::GetBuildID(memory.get());
351 }
352 }
353 return SetBuildID(std::move(result));
354 }
355
SetBuildID(std::string && new_build_id)356 SharedString MapInfo::SetBuildID(std::string&& new_build_id) {
357 std::unique_ptr<SharedString> new_build_id_ptr(new SharedString(std::move(new_build_id)));
358 SharedString* expected_id = nullptr;
359 // Strong version since we need to reliably return the stored pointer.
360 if (build_id().compare_exchange_strong(expected_id, new_build_id_ptr.get())) {
361 // Value saved, so make sure the memory is not freed.
362 return *new_build_id_ptr.release();
363 } else {
364 // The expected value is set to the stored value on failure.
365 return *expected_id;
366 }
367 }
368
GetElfFields()369 MapInfo::ElfFields& MapInfo::GetElfFields() {
370 ElfFields* elf_fields = elf_fields_.load(std::memory_order_acquire);
371 if (elf_fields != nullptr) {
372 return *elf_fields;
373 }
374 // Allocate and initialize the field in thread-safe way.
375 std::unique_ptr<ElfFields> desired(new ElfFields());
376 ElfFields* expected = nullptr;
377 // Strong version is reliable. Weak version might randomly return false.
378 if (elf_fields_.compare_exchange_strong(expected, desired.get())) {
379 return *desired.release(); // Success: we transferred the pointer ownership to the field.
380 } else {
381 return *expected; // Failure: 'expected' is updated to the value set by the other thread.
382 }
383 }
384
GetPrintableBuildID()385 std::string MapInfo::GetPrintableBuildID() {
386 std::string raw_build_id = GetBuildID();
387 if (raw_build_id.empty()) {
388 return "";
389 }
390 std::string printable_build_id;
391 for (const char& c : raw_build_id) {
392 // Use %hhx to avoid sign extension on abis that have signed chars.
393 printable_build_id += android::base::StringPrintf("%02hhx", c);
394 }
395 return printable_build_id;
396 }
397
398 } // namespace unwindstack
399