1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "snapuserd.h"
18 
19 #include <csignal>
20 #include <optional>
21 #include <set>
22 
23 #include <snapuserd/snapuserd_client.h>
24 
25 namespace android {
26 namespace snapshot {
27 
28 using namespace android;
29 using namespace android::dm;
30 using android::base::unique_fd;
31 
32 #define SNAP_LOG(level) LOG(level) << misc_name_ << ": "
33 #define SNAP_PLOG(level) PLOG(level) << misc_name_ << ": "
34 
35 /*
36  * Merging a copy operation involves the following flow:
37  *
38  * 1: dm-snapshot layer requests merge for a 4k block. dm-user sends the request
39  *    to the daemon
40  * 2: daemon reads the source block
41  * 3: daemon copies the source data
42  * 4: IO completion sent back to dm-user (a switch from user space to kernel)
43  * 5: dm-snapshot merges the data to base device
44  * 6: dm-snapshot sends the merge-completion IO to dm-user
45  * 7: dm-user re-directs the merge completion IO to daemon (one more switch)
46  * 8: daemon updates the COW file about the completed merge request (a write syscall) and followed
47  * by a fysnc. 9: Send the IO completion back to dm-user
48  *
49  * The above sequence is a significant overhead especially when merging one 4k
50  * block at a time.
51  *
52  * Read-ahead layer will optimize the above path by reading the data from base
53  * device in the background so that merging thread can retrieve the data from
54  * the read-ahead cache. Additionally, syncing of merged data is deferred to
55  * read-ahead thread threadby the IO path is not bottlenecked.
56  *
57  * We create a scratch space of 2MB to store the read-ahead data in the COW
58  * device.
59  *
60  *      +-----------------------+
61  *      |     Header (fixed)    |
62  *      +-----------------------+
63  *      |    Scratch space      |  <-- 2MB
64  *      +-----------------------+
65  *
66  *      Scratch space is as follows:
67  *
68  *      +-----------------------+
69  *      |       Metadata        | <- 4k page
70  *      +-----------------------+
71  *      |       Metadata        | <- 4k page
72  *      +-----------------------+
73  *      |                       |
74  *      |    Read-ahead data    |
75  *      |                       |
76  *      +-----------------------+
77  *
78  * State transitions and communication between read-ahead thread and worker
79  * thread during merge:
80  * =====================================================================
81  *
82  *   Worker Threads                                 Read-Ahead thread
83  *   ------------------------------------------------------------------
84  *
85  *      |
86  *      |
87  *  --> -----------------READ_AHEAD_BEGIN------------->|
88  *  |   |                                              | READ_AHEAD_IN_PROGRESS
89  *  |  WAIT                                            |
90  *  |   |                                              |
91  *  |   |<-----------------IO_IN_PROGRESS---------------
92  *  |   |                                              |
93  *  |   | IO_IN_PRGRESS                               WAIT
94  *  |   |                                              |
95  *  |<--|                                              |
96  *      |                                              |
97  *      ------------------IO_TERMINATED--------------->|
98  *                                                     END
99  *
100  *
101  * ===================================================================
102  *
103  * Example:
104  *
105  * We have 6 copy operations to be executed in OTA and there is a overlap. Update-engine
106  * will write to COW file as follows:
107  *
108  * Op-1: 20 -> 23
109  * Op-2: 19 -> 22
110  * Op-3: 18 -> 21
111  * Op-4: 17 -> 20
112  * Op-5: 16 -> 19
113  * Op-6: 15 -> 18
114  *
115  * Read-ahead thread will read all the 6 source blocks and store the data in the
116  * scratch space. Metadata will contain the destination block numbers. Thus,
117  * scratch space will look something like this:
118  *
119  * +--------------+
120  * | Block   23   |
121  * | offset - 1   |
122  * +--------------+
123  * | Block   22   |
124  * | offset - 2   |
125  * +--------------+
126  * | Block   21   |
127  * | offset - 3   |
128  * +--------------+
129  *    ...
130  *    ...
131  * +--------------+
132  * | Data-Block 20| <-- offset - 1
133  * +--------------+
134  * | Data-Block 19| <-- offset - 2
135  * +--------------+
136  * | Data-Block 18| <-- offset - 3
137  * +--------------+
138  *     ...
139  *     ...
140  *
141  * ====================================================================
142  * IO Path:
143  *
144  * Read-ahead will serve the data to worker threads during merge only
145  * after metadata and data are persisted to the scratch space. Worker
146  * threads during merge will always retrieve the data from cache; if the
147  * cache is not populated, it will wait for the read-ahead thread to finish.
148  * Furthermore, the number of operations merged will by synced to the header
149  * only when all the blocks in the read-ahead cache are merged. In the above
150  * case, when all 6 operations are merged, COW Header is updated with
151  * num_merge_ops = 6.
152  *
153  * Merge resume after crash:
154  *
155  * Let's say we have a crash after 5 operations are merged. i.e. after
156  * Op-5: 16->19 is completed but before the Op-6 is merged. Thus, COW Header
157  * num_merge_ops will be 0 as the all the ops were not merged yet. During next
158  * reboot, read-ahead thread will re-construct the data in-memory from the
159  * scratch space; when merge resumes, Op-1 will be re-exectued. However,
160  * data will be served from read-ahead cache safely even though, block 20
161  * was over-written by Op-4.
162  *
163  */
164 
ReadAheadThread(const std::string & cow_device,const std::string & backing_device,const std::string & misc_name,std::shared_ptr<Snapuserd> snapuserd)165 ReadAheadThread::ReadAheadThread(const std::string& cow_device, const std::string& backing_device,
166                                  const std::string& misc_name,
167                                  std::shared_ptr<Snapuserd> snapuserd) {
168     cow_device_ = cow_device;
169     backing_store_device_ = backing_device;
170     misc_name_ = misc_name;
171     snapuserd_ = snapuserd;
172 }
173 
CheckOverlap(const CowOperation * cow_op)174 void ReadAheadThread::CheckOverlap(const CowOperation* cow_op) {
175     uint64_t source_block = cow_op->source;
176     uint64_t source_offset = 0;
177     if (dest_blocks_.count(cow_op->new_block) || source_blocks_.count(source_block) ||
178         (source_offset > 0 && source_blocks_.count(source_block + 1))) {
179         overlap_ = true;
180     }
181 
182     dest_blocks_.insert(source_block);
183     if (source_offset > 0) {
184         dest_blocks_.insert(source_block + 1);
185     }
186     source_blocks_.insert(cow_op->new_block);
187 }
188 
PrepareReadAhead(uint64_t * source_offset,int * pending_ops,std::vector<uint64_t> & blocks)189 void ReadAheadThread::PrepareReadAhead(uint64_t* source_offset, int* pending_ops,
190                                        std::vector<uint64_t>& blocks) {
191     int num_ops = *pending_ops;
192     int nr_consecutive = 0;
193     CHECK_NE(source_offset, nullptr);
194 
195     if (!RAIterDone() && num_ops) {
196         // Get the first block with offset
197         const CowOperation* cow_op = GetRAOpIter();
198         CHECK_NE(cow_op, nullptr);
199         *source_offset = cow_op->source;
200         if (cow_op->type == kCowCopyOp) {
201             *source_offset *= BLOCK_SZ;
202         }
203         RAIterNext();
204         num_ops -= 1;
205         nr_consecutive = 1;
206         blocks.push_back(cow_op->new_block);
207 
208         if (!overlap_) {
209             CheckOverlap(cow_op);
210         }
211 
212         /*
213          * Find number of consecutive blocks working backwards.
214          */
215         while (!RAIterDone() && num_ops) {
216             const CowOperation* op = GetRAOpIter();
217             CHECK_NE(op, nullptr);
218             uint64_t next_offset = op->source;
219             if (op->type == kCowCopyOp) {
220                 next_offset *= BLOCK_SZ;
221             }
222             if (next_offset + nr_consecutive * BLOCK_SZ != *source_offset) {
223                 break;
224             }
225             nr_consecutive += 1;
226             num_ops -= 1;
227             blocks.push_back(op->new_block);
228             RAIterNext();
229 
230             if (!overlap_) {
231                 CheckOverlap(op);
232             }
233         }
234     }
235 }
236 
ReconstructDataFromCow()237 bool ReadAheadThread::ReconstructDataFromCow() {
238     std::unordered_map<uint64_t, void*>& read_ahead_buffer_map = snapuserd_->GetReadAheadMap();
239     read_ahead_buffer_map.clear();
240     loff_t metadata_offset = 0;
241     loff_t start_data_offset = snapuserd_->GetBufferDataOffset();
242     int num_ops = 0;
243     int total_blocks_merged = 0;
244 
245     // This memcpy is important as metadata_buffer_ will be an unaligned address and will fault
246     // on 32-bit systems
247     std::unique_ptr<uint8_t[]> metadata_buffer =
248             std::make_unique<uint8_t[]>(snapuserd_->GetBufferMetadataSize());
249     memcpy(metadata_buffer.get(), metadata_buffer_, snapuserd_->GetBufferMetadataSize());
250 
251     while (true) {
252         struct ScratchMetadata* bm = reinterpret_cast<struct ScratchMetadata*>(
253                 (char*)metadata_buffer.get() + metadata_offset);
254 
255         // Done reading metadata
256         if (bm->new_block == 0 && bm->file_offset == 0) {
257             break;
258         }
259 
260         loff_t buffer_offset = bm->file_offset - start_data_offset;
261         void* bufptr = static_cast<void*>((char*)read_ahead_buffer_ + buffer_offset);
262         read_ahead_buffer_map[bm->new_block] = bufptr;
263         num_ops += 1;
264         total_blocks_merged += 1;
265 
266         metadata_offset += sizeof(struct ScratchMetadata);
267     }
268 
269     // We are done re-constructing the mapping; however, we need to make sure
270     // all the COW operations to-be merged are present in the re-constructed
271     // mapping.
272     while (!RAIterDone()) {
273         const CowOperation* op = GetRAOpIter();
274         if (read_ahead_buffer_map.find(op->new_block) != read_ahead_buffer_map.end()) {
275             num_ops -= 1;
276             snapuserd_->SetFinalBlockMerged(op->new_block);
277             RAIterNext();
278         } else {
279             // Verify that we have covered all the ops which were re-constructed
280             // from COW device - These are the ops which are being
281             // re-constructed after crash.
282             if (!(num_ops == 0)) {
283                 SNAP_LOG(ERROR) << "ReconstructDataFromCow failed. Not all ops recoverd "
284                                 << " Pending ops: " << num_ops;
285                 snapuserd_->ReadAheadIOFailed();
286                 return false;
287             }
288             break;
289         }
290     }
291 
292     snapuserd_->SetTotalRaBlocksMerged(total_blocks_merged);
293 
294     snapuserd_->ReconstructDataFromCowFinish();
295 
296     if (!snapuserd_->ReadAheadIOCompleted(true)) {
297         SNAP_LOG(ERROR) << "ReadAheadIOCompleted failed...";
298         snapuserd_->ReadAheadIOFailed();
299         return false;
300     }
301 
302     SNAP_LOG(INFO) << "ReconstructDataFromCow success";
303     return true;
304 }
305 
ReadAheadIOStart()306 bool ReadAheadThread::ReadAheadIOStart() {
307     // Check if the data has to be constructed from the COW file.
308     // This will be true only once during boot up after a crash
309     // during merge.
310     if (snapuserd_->ReconstructDataFromCow()) {
311         return ReconstructDataFromCow();
312     }
313 
314     std::unordered_map<uint64_t, void*>& read_ahead_buffer_map = snapuserd_->GetReadAheadMap();
315     read_ahead_buffer_map.clear();
316 
317     int num_ops = (snapuserd_->GetBufferDataSize()) / BLOCK_SZ;
318     loff_t metadata_offset = 0;
319 
320     struct ScratchMetadata* bm =
321             reinterpret_cast<struct ScratchMetadata*>((char*)metadata_buffer_ + metadata_offset);
322 
323     bm->new_block = 0;
324     bm->file_offset = 0;
325 
326     std::vector<uint64_t> blocks;
327 
328     loff_t buffer_offset = 0;
329     loff_t offset = 0;
330     loff_t file_offset = snapuserd_->GetBufferDataOffset();
331     int total_blocks_merged = 0;
332     overlap_ = false;
333     dest_blocks_.clear();
334     source_blocks_.clear();
335 
336     while (true) {
337         uint64_t source_offset;
338         int linear_blocks;
339 
340         PrepareReadAhead(&source_offset, &num_ops, blocks);
341         linear_blocks = blocks.size();
342         if (linear_blocks == 0) {
343             // No more blocks to read
344             SNAP_LOG(DEBUG) << " Read-ahead completed....";
345             break;
346         }
347 
348         // Get the first block in the consecutive set of blocks
349         source_offset = source_offset - (linear_blocks - 1) * BLOCK_SZ;
350         size_t io_size = (linear_blocks * BLOCK_SZ);
351         num_ops -= linear_blocks;
352         total_blocks_merged += linear_blocks;
353 
354         // Mark the block number as the one which will
355         // be the final block to be merged in this entire region.
356         // Read-ahead thread will get
357         // notified when this block is merged to make
358         // forward progress
359         snapuserd_->SetFinalBlockMerged(blocks.back());
360 
361         while (linear_blocks) {
362             uint64_t new_block = blocks.back();
363             blocks.pop_back();
364             // Assign the mapping
365             void* bufptr = static_cast<void*>((char*)read_ahead_buffer_ + offset);
366             read_ahead_buffer_map[new_block] = bufptr;
367             offset += BLOCK_SZ;
368 
369             bm = reinterpret_cast<struct ScratchMetadata*>((char*)metadata_buffer_ +
370                                                            metadata_offset);
371             bm->new_block = new_block;
372             bm->file_offset = file_offset;
373 
374             metadata_offset += sizeof(struct ScratchMetadata);
375             file_offset += BLOCK_SZ;
376 
377             linear_blocks -= 1;
378         }
379 
380         // Read from the base device consecutive set of blocks in one shot
381         if (!android::base::ReadFullyAtOffset(backing_store_fd_,
382                                               (char*)read_ahead_buffer_ + buffer_offset, io_size,
383                                               source_offset)) {
384             SNAP_PLOG(ERROR) << "Ordered-op failed. Read from backing store: "
385                              << backing_store_device_ << "at block :" << source_offset / BLOCK_SZ
386                              << " offset :" << source_offset % BLOCK_SZ
387                              << " buffer_offset : " << buffer_offset << " io_size : " << io_size
388                              << " buf-addr : " << read_ahead_buffer_;
389 
390             snapuserd_->ReadAheadIOFailed();
391             return false;
392         }
393 
394         // This is important - explicitly set the contents to zero. This is used
395         // when re-constructing the data after crash. This indicates end of
396         // reading metadata contents when re-constructing the data
397         bm = reinterpret_cast<struct ScratchMetadata*>((char*)metadata_buffer_ + metadata_offset);
398         bm->new_block = 0;
399         bm->file_offset = 0;
400 
401         buffer_offset += io_size;
402     }
403 
404     snapuserd_->SetTotalRaBlocksMerged(total_blocks_merged);
405 
406     // Flush the data only if we have a overlapping blocks in the region
407     if (!snapuserd_->ReadAheadIOCompleted(overlap_)) {
408         SNAP_LOG(ERROR) << "ReadAheadIOCompleted failed...";
409         snapuserd_->ReadAheadIOFailed();
410         return false;
411     }
412 
413     return true;
414 }
415 
RunThread()416 bool ReadAheadThread::RunThread() {
417     if (!InitializeFds()) {
418         return false;
419     }
420 
421     InitializeRAIter();
422     InitializeBuffer();
423 
424     while (!RAIterDone()) {
425         if (!ReadAheadIOStart()) {
426             return false;
427         }
428 
429         bool status = snapuserd_->WaitForMergeToComplete();
430 
431         if (status && !snapuserd_->CommitMerge(snapuserd_->GetTotalRaBlocksMerged())) {
432             return false;
433         }
434 
435         if (!status) break;
436     }
437 
438     CloseFds();
439     SNAP_LOG(INFO) << " ReadAhead thread terminating....";
440     return true;
441 }
442 
443 // Initialization
InitializeFds()444 bool ReadAheadThread::InitializeFds() {
445     backing_store_fd_.reset(open(backing_store_device_.c_str(), O_RDONLY));
446     if (backing_store_fd_ < 0) {
447         SNAP_PLOG(ERROR) << "Open Failed: " << backing_store_device_;
448         return false;
449     }
450 
451     cow_fd_.reset(open(cow_device_.c_str(), O_RDWR));
452     if (cow_fd_ < 0) {
453         SNAP_PLOG(ERROR) << "Open Failed: " << cow_device_;
454         return false;
455     }
456 
457     return true;
458 }
459 
InitializeRAIter()460 void ReadAheadThread::InitializeRAIter() {
461     std::vector<const CowOperation*>& read_ahead_ops = snapuserd_->GetReadAheadOpsVec();
462     read_ahead_iter_ = read_ahead_ops.rbegin();
463 }
464 
RAIterDone()465 bool ReadAheadThread::RAIterDone() {
466     std::vector<const CowOperation*>& read_ahead_ops = snapuserd_->GetReadAheadOpsVec();
467     return read_ahead_iter_ == read_ahead_ops.rend();
468 }
469 
RAIterNext()470 void ReadAheadThread::RAIterNext() {
471     read_ahead_iter_++;
472 }
473 
GetRAOpIter()474 const CowOperation* ReadAheadThread::GetRAOpIter() {
475     return *read_ahead_iter_;
476 }
477 
InitializeBuffer()478 void ReadAheadThread::InitializeBuffer() {
479     void* mapped_addr = snapuserd_->GetMappedAddr();
480     // Map the scratch space region into memory
481     metadata_buffer_ =
482             static_cast<void*>((char*)mapped_addr + snapuserd_->GetBufferMetadataOffset());
483     read_ahead_buffer_ = static_cast<void*>((char*)mapped_addr + snapuserd_->GetBufferDataOffset());
484 }
485 
486 }  // namespace snapshot
487 }  // namespace android
488