Merge "Add the missing sr-Latn into png files and rename the png locale header"
diff --git a/applypatch/applypatch.cpp b/applypatch/applypatch.cpp
index 5006631..51bf393 100644
--- a/applypatch/applypatch.cpp
+++ b/applypatch/applypatch.cpp
@@ -27,11 +27,13 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <functional>
 #include <memory>
 #include <string>
 #include <utility>
 #include <vector>
 
+#include <android-base/logging.h>
 #include <android-base/parseint.h>
 #include <android-base/strings.h>
 #include <openssl/sha.h>
@@ -41,16 +43,10 @@
 #include "print_sha1.h"
 
 static int LoadPartitionContents(const std::string& filename, FileContents* file);
-static ssize_t FileSink(const unsigned char* data, ssize_t len, void* token);
-static int GenerateTarget(FileContents* source_file,
-                          const Value* source_patch_value,
-                          FileContents* copy_file,
-                          const Value* copy_patch_value,
-                          const char* source_filename,
-                          const char* target_filename,
-                          const uint8_t target_sha1[SHA_DIGEST_LENGTH],
-                          size_t target_size,
-                          const Value* bonus_data);
+static size_t FileSink(const unsigned char* data, size_t len, int fd);
+static int GenerateTarget(const FileContents& source_file, const std::unique_ptr<Value>& patch,
+                          const std::string& target_filename,
+                          const uint8_t target_sha1[SHA_DIGEST_LENGTH], const Value* bonus_data);
 
 // Read a file into memory; store the file contents and associated metadata in *file.
 // Return 0 on success.
@@ -190,7 +186,6 @@
   return 0;
 }
 
-
 // Save the contents of the given FileContents object under the given
 // filename.  Return 0 on success.
 int SaveFileContents(const char* filename, const FileContents* file) {
@@ -200,8 +195,8 @@
     return -1;
   }
 
-  ssize_t bytes_written = FileSink(file->data.data(), file->data.size(), &fd);
-  if (bytes_written != static_cast<ssize_t>(file->data.size())) {
+  size_t bytes_written = FileSink(file->data.data(), file->data.size(), fd);
+  if (bytes_written != file->data.size()) {
     printf("short write of \"%s\" (%zd bytes of %zu): %s\n", filename, bytes_written,
            file->data.size(), strerror(errno));
     return -1;
@@ -439,25 +434,17 @@
     return 0;
 }
 
-ssize_t FileSink(const unsigned char* data, ssize_t len, void* token) {
-    int fd = *static_cast<int*>(token);
-    ssize_t done = 0;
-    ssize_t wrote;
-    while (done < len) {
-        wrote = TEMP_FAILURE_RETRY(ota_write(fd, data+done, len-done));
-        if (wrote == -1) {
-            printf("error writing %zd bytes: %s\n", (len-done), strerror(errno));
-            return done;
-        }
-        done += wrote;
+static size_t FileSink(const unsigned char* data, size_t len, int fd) {
+  size_t done = 0;
+  while (done < len) {
+    ssize_t wrote = TEMP_FAILURE_RETRY(ota_write(fd, data + done, len - done));
+    if (wrote == -1) {
+      printf("error writing %zd bytes: %s\n", (len - done), strerror(errno));
+      return done;
     }
-    return done;
-}
-
-ssize_t MemorySink(const unsigned char* data, ssize_t len, void* token) {
-    std::string* s = static_cast<std::string*>(token);
-    s->append(reinterpret_cast<const char*>(data), len);
-    return len;
+    done += wrote;
+  }
+  return done;
 }
 
 // Return the amount of free space (in bytes) on the filesystem
@@ -480,108 +467,90 @@
     }
 }
 
-// This function applies binary patches to files in a way that is safe
-// (the original file is not touched until we have the desired
-// replacement for it) and idempotent (it's okay to run this program
-// multiple times).
+// This function applies binary patches to EMMC target files in a way that is safe (the original
+// file is not touched until we have the desired replacement for it) and idempotent (it's okay to
+// run this program multiple times).
 //
-// - if the sha1 hash of <target_filename> is <target_sha1_string>,
-//   does nothing and exits successfully.
+// - If the SHA-1 hash of <target_filename> is <target_sha1_string>, does nothing and exits
+//   successfully.
 //
-// - otherwise, if the sha1 hash of <source_filename> is one of the
-//   entries in <patch_sha1_str>, the corresponding patch from
-//   <patch_data> (which must be a VAL_BLOB) is applied to produce a
-//   new file (the type of patch is automatically detected from the
-//   blob data).  If that new file has sha1 hash <target_sha1_str>,
-//   moves it to replace <target_filename>, and exits successfully.
-//   Note that if <source_filename> and <target_filename> are not the
-//   same, <source_filename> is NOT deleted on success.
-//   <target_filename> may be the string "-" to mean "the same as
-//   source_filename".
+// - Otherwise, if the SHA-1 hash of <source_filename> is one of the entries in <patch_sha1_str>,
+//   the corresponding patch from <patch_data> (which must be a VAL_BLOB) is applied to produce a
+//   new file (the type of patch is automatically detected from the blob data). If that new file
+//   has SHA-1 hash <target_sha1_str>, moves it to replace <target_filename>, and exits
+//   successfully. Note that if <source_filename> and <target_filename> are not the same,
+//   <source_filename> is NOT deleted on success. <target_filename> may be the string "-" to mean
+//   "the same as <source_filename>".
 //
-// - otherwise, or if any error is encountered, exits with non-zero
-//   status.
+// - Otherwise, or if any error is encountered, exits with non-zero status.
 //
-// <source_filename> may refer to a partition to read the source data.
-// See the comments for the LoadPartitionContents() function above
-// for the format of such a filename.
-
-int applypatch(const char* source_filename,
-               const char* target_filename,
-               const char* target_sha1_str,
-               size_t target_size,
+// <source_filename> must refer to an EMMC partition to read the source data. See the comments for
+// the LoadPartitionContents() function above for the format of such a filename. <target_size> has
+// become obsolete since we have dropped the support for patching non-EMMC targets (EMMC targets
+// have the size embedded in the filename).
+int applypatch(const char* source_filename, const char* target_filename,
+               const char* target_sha1_str, size_t target_size __unused,
                const std::vector<std::string>& patch_sha1_str,
-               const std::vector<std::unique_ptr<Value>>& patch_data,
-               const Value* bonus_data) {
-    printf("patch %s: ", source_filename);
+               const std::vector<std::unique_ptr<Value>>& patch_data, const Value* bonus_data) {
+  printf("patch %s: ", source_filename);
 
-    if (target_filename[0] == '-' && target_filename[1] == '\0') {
-        target_filename = source_filename;
+  if (target_filename[0] == '-' && target_filename[1] == '\0') {
+    target_filename = source_filename;
+  }
+
+  if (strncmp(target_filename, "EMMC:", 5) != 0) {
+    printf("Supporting patching EMMC targets only.\n");
+    return 1;
+  }
+
+  uint8_t target_sha1[SHA_DIGEST_LENGTH];
+  if (ParseSha1(target_sha1_str, target_sha1) != 0) {
+    printf("failed to parse tgt-sha1 \"%s\"\n", target_sha1_str);
+    return 1;
+  }
+
+  // We try to load the target file into the source_file object.
+  FileContents source_file;
+  if (LoadFileContents(target_filename, &source_file) == 0) {
+    if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_LENGTH) == 0) {
+      // The early-exit case: the patch was already applied, this file has the desired hash, nothing
+      // for us to do.
+      printf("already %s\n", short_sha1(target_sha1).c_str());
+      return 0;
     }
+  }
 
-    uint8_t target_sha1[SHA_DIGEST_LENGTH];
-    if (ParseSha1(target_sha1_str, target_sha1) != 0) {
-        printf("failed to parse tgt-sha1 \"%s\"\n", target_sha1_str);
-        return 1;
+  if (source_file.data.empty() ||
+      (target_filename != source_filename && strcmp(target_filename, source_filename) != 0)) {
+    // Need to load the source file: either we failed to load the target file, or we did but it's
+    // different from the expected.
+    source_file.data.clear();
+    LoadFileContents(source_filename, &source_file);
+  }
+
+  if (!source_file.data.empty()) {
+    int to_use = FindMatchingPatch(source_file.sha1, patch_sha1_str);
+    if (to_use != -1) {
+      return GenerateTarget(source_file, patch_data[to_use], target_filename, target_sha1,
+                            bonus_data);
     }
+  }
 
-    FileContents source_file;
-    const Value* source_patch_value = nullptr;
+  printf("source file is bad; trying copy\n");
 
-    // We try to load the target file into the source_file object.
-    if (LoadFileContents(target_filename, &source_file) == 0) {
-        if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_LENGTH) == 0) {
-            // The early-exit case:  the patch was already applied, this file
-            // has the desired hash, nothing for us to do.
-            printf("already %s\n", short_sha1(target_sha1).c_str());
-            return 0;
-        }
-    }
+  FileContents copy_file;
+  if (LoadFileContents(CACHE_TEMP_SOURCE, &copy_file) < 0) {
+    printf("failed to read copy file\n");
+    return 1;
+  }
 
-    if (source_file.data.empty() ||
-        (target_filename != source_filename &&
-         strcmp(target_filename, source_filename) != 0)) {
-        // Need to load the source file:  either we failed to load the
-        // target file, or we did but it's different from the source file.
-        source_file.data.clear();
-        LoadFileContents(source_filename, &source_file);
-    }
+  int to_use = FindMatchingPatch(copy_file.sha1, patch_sha1_str);
+  if (to_use == -1) {
+    printf("copy file doesn't match source SHA-1s either\n");
+    return 1;
+  }
 
-    if (!source_file.data.empty()) {
-        int to_use = FindMatchingPatch(source_file.sha1, patch_sha1_str);
-        if (to_use >= 0) {
-            source_patch_value = patch_data[to_use].get();
-        }
-    }
-
-    FileContents copy_file;
-    const Value* copy_patch_value = nullptr;
-    if (source_patch_value == nullptr) {
-        source_file.data.clear();
-        printf("source file is bad; trying copy\n");
-
-        if (LoadFileContents(CACHE_TEMP_SOURCE, &copy_file) < 0) {
-            // fail.
-            printf("failed to read copy file\n");
-            return 1;
-        }
-
-        int to_use = FindMatchingPatch(copy_file.sha1, patch_sha1_str);
-        if (to_use >= 0) {
-            copy_patch_value = patch_data[to_use].get();
-        }
-
-        if (copy_patch_value == nullptr) {
-            // fail.
-            printf("copy file doesn't match source SHA-1s either\n");
-            return 1;
-        }
-    }
-
-    return GenerateTarget(&source_file, source_patch_value,
-                          &copy_file, copy_patch_value,
-                          source_filename, target_filename,
-                          target_sha1, target_size, bonus_data);
+  return GenerateTarget(copy_file, patch_data[to_use], target_filename, target_sha1, bonus_data);
 }
 
 /*
@@ -638,34 +607,9 @@
   return 0;
 }
 
-static int GenerateTarget(FileContents* source_file,
-                          const Value* source_patch_value,
-                          FileContents* copy_file,
-                          const Value* copy_patch_value,
-                          const char* source_filename,
-                          const char* target_filename,
-                          const uint8_t target_sha1[SHA_DIGEST_LENGTH],
-                          size_t target_size,
-                          const Value* bonus_data) {
-  // assume that target_filename (eg "/system/app/Foo.apk") is located
-  // on the same filesystem as its top-level directory ("/system").
-  // We need something that exists for calling statfs().
-  std::string target_fs = target_filename;
-  auto slash_pos = target_fs.find('/', 1);
-  if (slash_pos != std::string::npos) {
-    target_fs.resize(slash_pos);
-  }
-
-  FileContents* source_to_use;
-  const Value* patch;
-  if (source_patch_value != nullptr) {
-    source_to_use = source_file;
-    patch = source_patch_value;
-  } else {
-    source_to_use = copy_file;
-    patch = copy_patch_value;
-  }
-
+static int GenerateTarget(const FileContents& source_file, const std::unique_ptr<Value>& patch,
+                          const std::string& target_filename,
+                          const uint8_t target_sha1[SHA_DIGEST_LENGTH], const Value* bonus_data) {
   if (patch->type != VAL_BLOB) {
     printf("patch is not a blob\n");
     return 1;
@@ -683,137 +627,41 @@
     return 1;
   }
 
-  bool target_is_partition = (strncmp(target_filename, "EMMC:", 5) == 0);
-  const std::string tmp_target_filename = std::string(target_filename) + ".patch";
+  CHECK(android::base::StartsWith(target_filename, "EMMC:"));
 
-  int retry = 1;
-  bool made_copy = false;
-  SHA_CTX ctx;
+  // We still write the original source to cache, in case the partition write is interrupted.
+  if (MakeFreeSpaceOnCache(source_file.data.size()) < 0) {
+    printf("not enough free space on /cache\n");
+    return 1;
+  }
+  if (SaveFileContents(CACHE_TEMP_SOURCE, &source_file) < 0) {
+    printf("failed to back up source file\n");
+    return 1;
+  }
+
+  // We store the decoded output in memory.
   std::string memory_sink_str;  // Don't need to reserve space.
-  do {
-    // Is there enough room in the target filesystem to hold the patched file?
+  SinkFn sink = [&memory_sink_str](const unsigned char* data, size_t len) {
+    memory_sink_str.append(reinterpret_cast<const char*>(data), len);
+    return len;
+  };
 
-    if (target_is_partition) {
-      // If the target is a partition, we're actually going to
-      // write the output to /tmp and then copy it to the
-      // partition.  statfs() always returns 0 blocks free for
-      // /tmp, so instead we'll just assume that /tmp has enough
-      // space to hold the file.
+  SHA_CTX ctx;
+  SHA1_Init(&ctx);
 
-      // We still write the original source to cache, in case
-      // the partition write is interrupted.
-      if (MakeFreeSpaceOnCache(source_file->data.size()) < 0) {
-        printf("not enough free space on /cache\n");
-        return 1;
-      }
-      if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
-        printf("failed to back up source file\n");
-        return 1;
-      }
-      made_copy = true;
-      retry = 0;
-    } else {
-      bool enough_space = false;
-      if (retry > 0) {
-        size_t free_space = FreeSpaceForFile(target_fs.c_str());
-        enough_space = (free_space > (256 << 10)) &&          // 256k (two-block) minimum
-                       (free_space > (target_size * 3 / 2));  // 50% margin of error
-        if (!enough_space) {
-          printf("target %zu bytes; free space %zu bytes; retry %d; enough %d\n", target_size,
-                 free_space, retry, enough_space);
-        }
-      }
+  int result;
+  if (use_bsdiff) {
+    result = ApplyBSDiffPatch(source_file.data.data(), source_file.data.size(), patch.get(), 0,
+                              sink, &ctx);
+  } else {
+    result = ApplyImagePatch(source_file.data.data(), source_file.data.size(), patch.get(), sink,
+                             &ctx, bonus_data);
+  }
 
-      if (!enough_space) {
-        retry = 0;
-      }
-
-      if (!enough_space && source_patch_value != nullptr) {
-        // Using the original source, but not enough free space.  First
-        // copy the source file to cache, then delete it from the original
-        // location.
-
-        if (strncmp(source_filename, "EMMC:", 5) == 0) {
-          // It's impossible to free space on the target filesystem by
-          // deleting the source if the source is a partition.  If
-          // we're ever in a state where we need to do this, fail.
-          printf("not enough free space for target but source is partition\n");
-          return 1;
-        }
-
-        if (MakeFreeSpaceOnCache(source_file->data.size()) < 0) {
-          printf("not enough free space on /cache\n");
-          return 1;
-        }
-
-        if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
-          printf("failed to back up source file\n");
-          return 1;
-        }
-        made_copy = true;
-        unlink(source_filename);
-
-        size_t free_space = FreeSpaceForFile(target_fs.c_str());
-        printf("(now %zu bytes free for target) ", free_space);
-      }
-    }
-
-    SinkFn sink = nullptr;
-    void* token = nullptr;
-    unique_fd output_fd;
-    if (target_is_partition) {
-      // We store the decoded output in memory.
-      sink = MemorySink;
-      token = &memory_sink_str;
-    } else {
-      // We write the decoded output to "<tgt-file>.patch".
-      output_fd.reset(ota_open(tmp_target_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_SYNC,
-                               S_IRUSR | S_IWUSR));
-      if (output_fd == -1) {
-        printf("failed to open output file %s: %s\n", tmp_target_filename.c_str(), strerror(errno));
-        return 1;
-      }
-      sink = FileSink;
-      token = &output_fd;
-    }
-
-    SHA1_Init(&ctx);
-
-    int result;
-    if (use_bsdiff) {
-      result = ApplyBSDiffPatch(source_to_use->data.data(), source_to_use->data.size(), patch, 0,
-                                sink, token, &ctx);
-    } else {
-      result = ApplyImagePatch(source_to_use->data.data(), source_to_use->data.size(), patch, sink,
-                               token, &ctx, bonus_data);
-    }
-
-    if (!target_is_partition) {
-      if (ota_fsync(output_fd) != 0) {
-        printf("failed to fsync file \"%s\": %s\n", tmp_target_filename.c_str(), strerror(errno));
-        result = 1;
-      }
-      if (ota_close(output_fd) != 0) {
-        printf("failed to close file \"%s\": %s\n", tmp_target_filename.c_str(), strerror(errno));
-        result = 1;
-      }
-    }
-
-    if (result != 0) {
-      if (retry == 0) {
-        printf("applying patch failed\n");
-        return 1;
-      } else {
-        printf("applying patch failed; retrying\n");
-      }
-      if (!target_is_partition) {
-        unlink(tmp_target_filename.c_str());
-      }
-    } else {
-      // succeeded; no need to retry
-      break;
-    }
-  } while (retry-- > 0);
+  if (result != 0) {
+    printf("applying patch failed\n");
+    return 1;
+  }
 
   uint8_t current_target_sha1[SHA_DIGEST_LENGTH];
   SHA1_Final(current_target_sha1, &ctx);
@@ -824,36 +672,15 @@
     printf("now %s\n", short_sha1(target_sha1).c_str());
   }
 
-  if (target_is_partition) {
-    // Copy the temp file to the partition.
-    if (WriteToPartition(reinterpret_cast<const unsigned char*>(memory_sink_str.c_str()),
-                         memory_sink_str.size(), target_filename) != 0) {
-      printf("write of patched data to %s failed\n", target_filename);
-      return 1;
-    }
-  } else {
-    // Give the .patch file the same owner, group, and mode of the original source file.
-    if (chmod(tmp_target_filename.c_str(), source_to_use->st.st_mode) != 0) {
-      printf("chmod of \"%s\" failed: %s\n", tmp_target_filename.c_str(), strerror(errno));
-      return 1;
-    }
-    if (chown(tmp_target_filename.c_str(), source_to_use->st.st_uid,
-              source_to_use->st.st_gid) != 0) {
-      printf("chown of \"%s\" failed: %s\n", tmp_target_filename.c_str(), strerror(errno));
-      return 1;
-    }
-
-    // Finally, rename the .patch file to replace the target file.
-    if (rename(tmp_target_filename.c_str(), target_filename) != 0) {
-      printf("rename of .patch to \"%s\" failed: %s\n", target_filename, strerror(errno));
-      return 1;
-    }
+  // Write back the temp file to the partition.
+  if (WriteToPartition(reinterpret_cast<const unsigned char*>(memory_sink_str.c_str()),
+                       memory_sink_str.size(), target_filename) != 0) {
+    printf("write of patched data to %s failed\n", target_filename.c_str());
+    return 1;
   }
 
-  // If this run of applypatch created the copy, and we're here, we can delete it.
-  if (made_copy) {
-    unlink(CACHE_TEMP_SOURCE);
-  }
+  // Delete the backup copy of the source.
+  unlink(CACHE_TEMP_SOURCE);
 
   // Success!
   return 0;
diff --git a/applypatch/bspatch.cpp b/applypatch/bspatch.cpp
index 9920c2b..f75a2c6 100644
--- a/applypatch/bspatch.cpp
+++ b/applypatch/bspatch.cpp
@@ -24,9 +24,9 @@
 #include <sys/types.h>
 
 #include <bspatch.h>
+#include <openssl/sha.h>
 
 #include "applypatch/applypatch.h"
-#include "openssl/sha.h"
 
 void ShowBSDiffLicense() {
     puts("The bsdiff library used herein is:\n"
@@ -60,10 +60,10 @@
         );
 }
 
-int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size, const Value* patch,
-                     ssize_t patch_offset, SinkFn sink, void* token, SHA_CTX* ctx) {
-  auto sha_sink = [&](const uint8_t* data, size_t len) {
-    len = sink(data, len, token);
+int ApplyBSDiffPatch(const unsigned char* old_data, size_t old_size, const Value* patch,
+                     size_t patch_offset, SinkFn sink, SHA_CTX* ctx) {
+  auto sha_sink = [&sink, &ctx](const uint8_t* data, size_t len) {
+    len = sink(data, len);
     if (ctx) SHA1_Update(ctx, data, len);
     return len;
   };
@@ -72,8 +72,8 @@
                          patch->data.size(), sha_sink);
 }
 
-int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size, const Value* patch,
-                        ssize_t patch_offset, std::vector<unsigned char>* new_data) {
+int ApplyBSDiffPatchMem(const unsigned char* old_data, size_t old_size, const Value* patch,
+                        size_t patch_offset, std::vector<unsigned char>* new_data) {
   auto vector_sink = [new_data](const uint8_t* data, size_t len) {
     new_data->insert(new_data->end(), data, data + len);
     return len;
diff --git a/applypatch/imgpatch.cpp b/applypatch/imgpatch.cpp
index adcc61f..7d8b736 100644
--- a/applypatch/imgpatch.cpp
+++ b/applypatch/imgpatch.cpp
@@ -43,12 +43,11 @@
   return android::base::get_unaligned<int32_t>(address);
 }
 
-int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
-                    const unsigned char* patch_data, ssize_t patch_size,
-                    SinkFn sink, void* token) {
+int ApplyImagePatch(const unsigned char* old_data, size_t old_size, const unsigned char* patch_data,
+                    size_t patch_size, SinkFn sink) {
   Value patch(VAL_BLOB, std::string(reinterpret_cast<const char*>(patch_data), patch_size));
 
-  return ApplyImagePatch(old_data, old_size, &patch, sink, token, nullptr, nullptr);
+  return ApplyImagePatch(old_data, old_size, &patch, sink, nullptr, nullptr);
 }
 
 /*
@@ -57,8 +56,8 @@
  * file, and update the SHA context with the output data as well.
  * Return 0 on success.
  */
-int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size, const Value* patch,
-                    SinkFn sink, void* token, SHA_CTX* ctx, const Value* bonus_data) {
+int ApplyImagePatch(const unsigned char* old_data, size_t old_size, const Value* patch, SinkFn sink,
+                    SHA_CTX* ctx, const Value* bonus_data) {
   if (patch->data.size() < 12) {
     printf("patch too short to contain header\n");
     return -1;
@@ -97,11 +96,11 @@
       size_t src_len = static_cast<size_t>(Read8(normal_header + 8));
       size_t patch_offset = static_cast<size_t>(Read8(normal_header + 16));
 
-      if (src_start + src_len > static_cast<size_t>(old_size)) {
+      if (src_start + src_len > old_size) {
         printf("source data too short\n");
         return -1;
       }
-      ApplyBSDiffPatch(old_data + src_start, src_len, patch, patch_offset, sink, token, ctx);
+      ApplyBSDiffPatch(old_data + src_start, src_len, patch, patch_offset, sink, ctx);
     } else if (type == CHUNK_RAW) {
       const char* raw_header = &patch->data[pos];
       pos += 4;
@@ -110,15 +109,14 @@
         return -1;
       }
 
-      ssize_t data_len = Read4(raw_header);
+      size_t data_len = static_cast<size_t>(Read4(raw_header));
 
       if (pos + data_len > patch->data.size()) {
         printf("failed to read chunk %d raw data\n", i);
         return -1;
       }
       if (ctx) SHA1_Update(ctx, &patch->data[pos], data_len);
-      if (sink(reinterpret_cast<const unsigned char*>(&patch->data[pos]), data_len, token) !=
-          data_len) {
+      if (sink(reinterpret_cast<const unsigned char*>(&patch->data[pos]), data_len) != data_len) {
         printf("failed to write chunk %d raw data\n", i);
         return -1;
       }
@@ -143,7 +141,7 @@
       int memLevel = Read4(deflate_header + 52);
       int strategy = Read4(deflate_header + 56);
 
-      if (src_start + src_len > static_cast<size_t>(old_size)) {
+      if (src_start + src_len > old_size) {
         printf("source data too short\n");
         return -1;
       }
@@ -240,9 +238,9 @@
           strm.avail_out = temp_data.size();
           strm.next_out = temp_data.data();
           ret = deflate(&strm, Z_FINISH);
-          ssize_t have = temp_data.size() - strm.avail_out;
+          size_t have = temp_data.size() - strm.avail_out;
 
-          if (sink(temp_data.data(), have, token) != have) {
+          if (sink(temp_data.data(), have) != have) {
             printf("failed to write %zd compressed bytes to output\n", have);
             return -1;
           }
diff --git a/applypatch/include/applypatch/applypatch.h b/applypatch/include/applypatch/applypatch.h
index 4489dec..da55432 100644
--- a/applypatch/include/applypatch/applypatch.h
+++ b/applypatch/include/applypatch/applypatch.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <sys/stat.h>
 
+#include <functional>
 #include <memory>
 #include <string>
 #include <vector>
@@ -41,7 +42,7 @@
 // and use it as the source instead.
 #define CACHE_TEMP_SOURCE "/cache/saved.file"
 
-typedef ssize_t (*SinkFn)(const unsigned char*, ssize_t, void*);
+using SinkFn = std::function<size_t(const unsigned char*, size_t)>;
 
 // applypatch.cpp
 int ShowLicenses();
@@ -66,18 +67,14 @@
 
 // bspatch.cpp
 void ShowBSDiffLicense();
-int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
-                     const Value* patch, ssize_t patch_offset,
-                     SinkFn sink, void* token, SHA_CTX* ctx);
-int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size,
-                        const Value* patch, ssize_t patch_offset,
-                        std::vector<unsigned char>* new_data);
+int ApplyBSDiffPatch(const unsigned char* old_data, size_t old_size, const Value* patch,
+                     size_t patch_offset, SinkFn sink, SHA_CTX* ctx);
+int ApplyBSDiffPatchMem(const unsigned char* old_data, size_t old_size, const Value* patch,
+                        size_t patch_offset, std::vector<unsigned char>* new_data);
 
 // imgpatch.cpp
-int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
-                    const Value* patch,
-                    SinkFn sink, void* token, SHA_CTX* ctx,
-                    const Value* bonus_data);
+int ApplyImagePatch(const unsigned char* old_data, size_t old_size, const Value* patch, SinkFn sink,
+                    SHA_CTX* ctx, const Value* bonus_data);
 
 // freecache.cpp
 int MakeFreeSpaceOnCache(size_t bytes_needed);
diff --git a/applypatch/include/applypatch/imgpatch.h b/applypatch/include/applypatch/imgpatch.h
index 6549f79..07c6609 100644
--- a/applypatch/include/applypatch/imgpatch.h
+++ b/applypatch/include/applypatch/imgpatch.h
@@ -19,10 +19,11 @@
 
 #include <sys/types.h>
 
-using SinkFn = ssize_t (*)(const unsigned char*, ssize_t, void*);
+#include <functional>
 
-int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
-                    const unsigned char* patch_data, ssize_t patch_size,
-                    SinkFn sink, void* token);
+using SinkFn = std::function<size_t(const unsigned char*, size_t)>;
+
+int ApplyImagePatch(const unsigned char* old_data, size_t old_size, const unsigned char* patch_data,
+                    size_t patch_size, SinkFn sink);
 
 #endif  // _APPLYPATCH_IMGPATCH_H
diff --git a/edify/expr.cpp b/edify/expr.cpp
index 2b7fd7a..54ab332 100644
--- a/edify/expr.cpp
+++ b/edify/expr.cpp
@@ -357,7 +357,7 @@
     if (args == nullptr) {
         return false;
     }
-    if (len == 0 || start + len > argv.size()) {
+    if (start + len > argv.size()) {
         return false;
     }
     for (size_t i = start; i < start + len; ++i) {
diff --git a/install.cpp b/install.cpp
index db8fb97..7cef44a 100644
--- a/install.cpp
+++ b/install.cpp
@@ -546,17 +546,21 @@
     std::chrono::duration<double> duration = std::chrono::system_clock::now() - start;
     int time_total = static_cast<int>(duration.count());
 
-    if (ensure_path_mounted(UNCRYPT_STATUS) != 0) {
+    bool has_cache = volume_for_path("/cache") != nullptr;
+    // Skip logging the uncrypt_status on devices without /cache.
+    if (has_cache) {
+      if (ensure_path_mounted(UNCRYPT_STATUS) != 0) {
         LOG(WARNING) << "Can't mount " << UNCRYPT_STATUS;
-    } else {
+      } else {
         std::string uncrypt_status;
         if (!android::base::ReadFileToString(UNCRYPT_STATUS, &uncrypt_status)) {
-            PLOG(WARNING) << "failed to read uncrypt status";
+          PLOG(WARNING) << "failed to read uncrypt status";
         } else if (!android::base::StartsWith(uncrypt_status, "uncrypt_")) {
-            PLOG(WARNING) << "corrupted uncrypt_status: " << uncrypt_status;
+          LOG(WARNING) << "corrupted uncrypt_status: " << uncrypt_status;
         } else {
-            log_buffer.push_back(android::base::Trim(uncrypt_status));
+          log_buffer.push_back(android::base::Trim(uncrypt_status));
         }
+      }
     }
 
     // The first two lines need to be the package name and install result.
diff --git a/otafault/ota_io.cpp b/otafault/ota_io.cpp
index f5b0113..3a89bb5 100644
--- a/otafault/ota_io.cpp
+++ b/otafault/ota_io.cpp
@@ -89,7 +89,7 @@
     return fclose(fh);
 }
 
-void OtaFcloser::operator()(FILE* f) {
+void OtaFcloser::operator()(FILE* f) const {
     __ota_fclose(f);
 };
 
diff --git a/otafault/ota_io.h b/otafault/ota_io.h
index 395b423..9428f1b 100644
--- a/otafault/ota_io.h
+++ b/otafault/ota_io.h
@@ -59,7 +59,7 @@
 int ota_close(unique_fd& fd);
 
 struct OtaFcloser {
-  void operator()(FILE*);
+  void operator()(FILE*) const;
 };
 
 using unique_file = std::unique_ptr<FILE, OtaFcloser>;
diff --git a/recovery.cpp b/recovery.cpp
index a374eed..b24efa9 100644
--- a/recovery.cpp
+++ b/recovery.cpp
@@ -752,13 +752,15 @@
 
 static bool prompt_and_wipe_data(Device* device) {
   const char* const headers[] = {
-    "Boot halted, user data is corrupt",
-    "Wipe all user data to recover",
+    "Can't load Android system. Your data may be corrupt.",
+    "If you continue to get this message, you may need to",
+    "perform a factory data reset and erase all user data",
+    "stored on this device.",
     NULL
   };
   const char* const items[] = {
-    "Retry boot",
-    "Wipe user data",
+    "Try again",
+    "Factory data reset",
     NULL
   };
   for (;;) {
@@ -791,47 +793,45 @@
     return success;
 }
 
-// Secure-wipe a given partition. It uses BLKSECDISCARD, if supported.
-// Otherwise, it goes with BLKDISCARD (if device supports BLKDISCARDZEROES) or
-// BLKZEROOUT.
+// Secure-wipe a given partition. It uses BLKSECDISCARD, if supported. Otherwise, it goes with
+// BLKDISCARD (if device supports BLKDISCARDZEROES) or BLKZEROOUT.
 static bool secure_wipe_partition(const std::string& partition) {
-    android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(partition.c_str(), O_WRONLY)));
-    if (fd == -1) {
-        PLOG(ERROR) << "failed to open \"" << partition << "\"";
+  android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(partition.c_str(), O_WRONLY)));
+  if (fd == -1) {
+    PLOG(ERROR) << "Failed to open \"" << partition << "\"";
+    return false;
+  }
+
+  uint64_t range[2] = { 0, 0 };
+  if (ioctl(fd, BLKGETSIZE64, &range[1]) == -1 || range[1] == 0) {
+    PLOG(ERROR) << "Failed to get partition size";
+    return false;
+  }
+  LOG(INFO) << "Secure-wiping \"" << partition << "\" from " << range[0] << " to " << range[1];
+
+  LOG(INFO) << "  Trying BLKSECDISCARD...";
+  if (ioctl(fd, BLKSECDISCARD, &range) == -1) {
+    PLOG(WARNING) << "  Failed";
+
+    // Use BLKDISCARD if it zeroes out blocks, otherwise use BLKZEROOUT.
+    unsigned int zeroes;
+    if (ioctl(fd, BLKDISCARDZEROES, &zeroes) == 0 && zeroes != 0) {
+      LOG(INFO) << "  Trying BLKDISCARD...";
+      if (ioctl(fd, BLKDISCARD, &range) == -1) {
+        PLOG(ERROR) << "  Failed";
         return false;
-    }
-
-    uint64_t range[2] = {0, 0};
-    if (ioctl(fd, BLKGETSIZE64, &range[1]) == -1 || range[1] == 0) {
-        PLOG(ERROR) << "failed to get partition size";
+      }
+    } else {
+      LOG(INFO) << "  Trying BLKZEROOUT...";
+      if (ioctl(fd, BLKZEROOUT, &range) == -1) {
+        PLOG(ERROR) << "  Failed";
         return false;
+      }
     }
-    printf("Secure-wiping \"%s\" from %" PRIu64 " to %" PRIu64 ".\n",
-           partition.c_str(), range[0], range[1]);
+  }
 
-    printf("Trying BLKSECDISCARD...\t");
-    if (ioctl(fd, BLKSECDISCARD, &range) == -1) {
-        printf("failed: %s\n", strerror(errno));
-
-        // Use BLKDISCARD if it zeroes out blocks, otherwise use BLKZEROOUT.
-        unsigned int zeroes;
-        if (ioctl(fd, BLKDISCARDZEROES, &zeroes) == 0 && zeroes != 0) {
-            printf("Trying BLKDISCARD...\t");
-            if (ioctl(fd, BLKDISCARD, &range) == -1) {
-                printf("failed: %s\n", strerror(errno));
-                return false;
-            }
-        } else {
-            printf("Trying BLKZEROOUT...\t");
-            if (ioctl(fd, BLKZEROOUT, &range) == -1) {
-                printf("failed: %s\n", strerror(errno));
-                return false;
-            }
-        }
-    }
-
-    printf("done\n");
-    return true;
+  LOG(INFO) << "  Done";
+  return true;
 }
 
 // Check if the wipe package matches expectation:
@@ -863,7 +863,7 @@
         return false;
     }
     std::string metadata;
-    if (!read_metadata_from_package(&zip, &metadata)) {
+    if (!read_metadata_from_package(zip, &metadata)) {
         CloseArchive(zip);
         return false;
     }
diff --git a/screen_ui.cpp b/screen_ui.cpp
index 706877b..bb2772d 100644
--- a/screen_ui.cpp
+++ b/screen_ui.cpp
@@ -98,7 +98,7 @@
     }
 }
 
-int ScreenRecoveryUI::PixelsFromDp(int dp) {
+int ScreenRecoveryUI::PixelsFromDp(int dp) const {
     return dp * density_;
 }
 
@@ -256,12 +256,12 @@
     *y += 4;
 }
 
-void ScreenRecoveryUI::DrawTextLine(int x, int* y, const char* line, bool bold) {
+void ScreenRecoveryUI::DrawTextLine(int x, int* y, const char* line, bool bold) const {
     gr_text(gr_sys_font(), x, *y, line, bold);
     *y += char_height_ + 4;
 }
 
-void ScreenRecoveryUI::DrawTextLines(int x, int* y, const char* const* lines) {
+void ScreenRecoveryUI::DrawTextLines(int x, int* y, const char* const* lines) const {
     for (size_t i = 0; lines != nullptr && lines[i] != nullptr; ++i) {
         DrawTextLine(x, y, lines[i], false);
     }
diff --git a/screen_ui.h b/screen_ui.h
index b2dcf4a..a2322c3 100644
--- a/screen_ui.h
+++ b/screen_ui.h
@@ -160,14 +160,14 @@
     void LoadBitmap(const char* filename, GRSurface** surface);
     void LoadLocalizedBitmap(const char* filename, GRSurface** surface);
 
-    int PixelsFromDp(int dp);
+    int PixelsFromDp(int dp) const;
     virtual int GetAnimationBaseline();
     virtual int GetProgressBaseline();
     virtual int GetTextBaseline();
 
     void DrawHorizontalRule(int* y);
-    void DrawTextLine(int x, int* y, const char* line, bool bold);
-    void DrawTextLines(int x, int* y, const char* const* lines);
+    void DrawTextLine(int x, int* y, const char* line, bool bold) const;
+    void DrawTextLines(int x, int* y, const char* const* lines) const;
 };
 
 #endif  // RECOVERY_UI_H
diff --git a/tests/component/applypatch_test.cpp b/tests/component/applypatch_test.cpp
index d178303..5cba68f 100644
--- a/tests/component/applypatch_test.cpp
+++ b/tests/component/applypatch_test.cpp
@@ -280,66 +280,6 @@
   ASSERT_NE(0, applypatch_check(&old_file[0], sha1s));
 }
 
-TEST_F(ApplyPatchFullTest, ApplyInPlace) {
-  std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 };
-  ASSERT_EQ(0, applypatch(&old_file[0], "-", &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_TRUE(file_cmp(old_file, new_file));
-
-  // reapply, applypatch is idempotent so it should succeed
-  ASSERT_EQ(0, applypatch(&old_file[0], "-", &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_TRUE(file_cmp(old_file, new_file));
-}
-
-TEST_F(ApplyPatchFullTest, ApplyInNewLocation) {
-  std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 };
-  // Apply bsdiff patch to new location.
-  ASSERT_EQ(
-      0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_TRUE(file_cmp(output_loc, new_file));
-
-  // Reapply to the same location.
-  ASSERT_EQ(
-      0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_TRUE(file_cmp(output_loc, new_file));
-}
-
-TEST_F(ApplyPatchFullTest, ApplyCorruptedInNewLocation) {
-  std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 };
-  // Apply bsdiff patch to new location with corrupted source.
-  mangle_file(old_file);
-  ASSERT_EQ(
-      0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_TRUE(file_cmp(output_loc, new_file));
-
-  // Reapply bsdiff patch to new location with corrupted source.
-  ASSERT_EQ(
-      0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_TRUE(file_cmp(output_loc, new_file));
-}
-
-TEST_F(ApplyPatchDoubleCacheTest, ApplyDoubleCorruptedInNewLocation) {
-  std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 };
-
-  // Apply bsdiff patch to new location with corrupted source and copy (no new file).
-  // Expected to fail.
-  mangle_file(old_file);
-  mangle_file(cache_file);
-  ASSERT_NE(
-      0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_FALSE(file_cmp(output_loc, new_file));
-
-  // Expected to fail again on retry.
-  ASSERT_NE(
-      0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_FALSE(file_cmp(output_loc, new_file));
-
-  // Expected to fail with incorrect new file.
-  mangle_file(output_loc);
-  ASSERT_NE(
-      0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr));
-  ASSERT_FALSE(file_cmp(output_loc, new_file));
-}
-
 TEST(ApplyPatchModesTest, InvalidArgs) {
   // At least two args (including the filename).
   ASSERT_EQ(2, applypatch_modes(1, (const char* []){ "applypatch" }));
@@ -348,70 +288,6 @@
   ASSERT_EQ(2, applypatch_modes(2, (const char* []){ "applypatch", "-x" }));
 }
 
-TEST(ApplyPatchModesTest, PatchMode) {
-  std::string boot_img = from_testdata_base("boot.img");
-  size_t boot_img_size;
-  std::string boot_img_sha1;
-  sha1sum(boot_img, &boot_img_sha1, &boot_img_size);
-
-  std::string recovery_img = from_testdata_base("recovery.img");
-  std::string recovery_img_sha1;
-  size_t size;
-  sha1sum(recovery_img, &recovery_img_sha1, &size);
-  std::string recovery_img_size = std::to_string(size);
-  std::string bonus_file = from_testdata_base("bonus.file");
-
-  // applypatch -b <bonus-file> <src-file> <tgt-file> <tgt-sha1> <tgt-size> <src-sha1>:<patch>
-  TemporaryFile tmp1;
-  std::string patch = boot_img_sha1 + ":" + from_testdata_base("recovery-from-boot.p");
-  std::vector<const char*> args = {
-    "applypatch",
-    "-b",
-    bonus_file.c_str(),
-    boot_img.c_str(),
-    tmp1.path,
-    recovery_img_sha1.c_str(),
-    recovery_img_size.c_str(),
-    patch.c_str()
-  };
-  ASSERT_EQ(0, applypatch_modes(args.size(), args.data()));
-
-  // applypatch <src-file> <tgt-file> <tgt-sha1> <tgt-size> <src-sha1>:<patch>
-  TemporaryFile tmp2;
-  patch = boot_img_sha1 + ":" + from_testdata_base("recovery-from-boot-with-bonus.p");
-  std::vector<const char*> args2 = {
-    "applypatch",
-    boot_img.c_str(),
-    tmp2.path,
-    recovery_img_sha1.c_str(),
-    recovery_img_size.c_str(),
-    patch.c_str()
-  };
-  ASSERT_EQ(0, applypatch_modes(args2.size(), args2.data()));
-
-  // applypatch -b <bonus-file> <src-file> <tgt-file> <tgt-sha1> <tgt-size> \
-  //               <src-sha1-fake>:<patch1> <src-sha1>:<patch2>
-  TemporaryFile tmp3;
-  std::string bad_sha1_a = android::base::StringPrintf("%040x", rand());
-  std::string bad_sha1_b = android::base::StringPrintf("%040x", rand());
-  std::string patch1 = bad_sha1_a + ":" + from_testdata_base("recovery-from-boot.p");
-  std::string patch2 = boot_img_sha1 + ":" + from_testdata_base("recovery-from-boot.p");
-  std::string patch3 = bad_sha1_b + ":" + from_testdata_base("recovery-from-boot.p");
-  std::vector<const char*> args3 = {
-    "applypatch",
-    "-b",
-    bonus_file.c_str(),
-    boot_img.c_str(),
-    tmp3.path,
-    recovery_img_sha1.c_str(),
-    recovery_img_size.c_str(),
-    patch1.c_str(),
-    patch2.c_str(),
-    patch3.c_str()
-  };
-  ASSERT_EQ(0, applypatch_modes(args3.size(), args3.data()));
-}
-
 TEST(ApplyPatchModesTest, PatchModeEmmcTarget) {
   std::string boot_img = from_testdata_base("boot.img");
   size_t boot_img_size;
diff --git a/tests/component/imgdiff_test.cpp b/tests/component/imgdiff_test.cpp
index 2f64850..7d00a3d 100644
--- a/tests/component/imgdiff_test.cpp
+++ b/tests/component/imgdiff_test.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <stdio.h>
+
 #include <string>
 #include <vector>
 
@@ -27,12 +29,6 @@
 
 using android::base::get_unaligned;
 
-static ssize_t MemorySink(const unsigned char* data, ssize_t len, void* token) {
-  std::string* s = static_cast<std::string*>(token);
-  s->append(reinterpret_cast<const char*>(data), len);
-  return len;
-}
-
 // Sanity check for the given imgdiff patch header.
 static void verify_patch_header(const std::string& patch, size_t* num_normal, size_t* num_raw,
                                 size_t* num_deflate) {
@@ -79,6 +75,18 @@
   if (num_deflate != nullptr) *num_deflate = deflate;
 }
 
+static void verify_patched_image(const std::string& src, const std::string& patch,
+                                 const std::string& tgt) {
+  std::string patched;
+  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
+                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
+                               [&patched](const unsigned char* data, size_t len) {
+                                 patched.append(reinterpret_cast<const char*>(data), len);
+                                 return len;
+                               }));
+  ASSERT_EQ(tgt, patched);
+}
+
 TEST(ImgdiffTest, invalid_args) {
   // Insufficient inputs.
   ASSERT_EQ(2, imgdiff(1, (const char* []){ "imgdiff" }));
@@ -124,11 +132,7 @@
   ASSERT_EQ(0U, num_deflate);
   ASSERT_EQ(1U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, zip_mode_smoke_store) {
@@ -177,11 +181,7 @@
   ASSERT_EQ(0U, num_deflate);
   ASSERT_EQ(1U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, zip_mode_smoke_compressed) {
@@ -230,11 +230,7 @@
   ASSERT_EQ(1U, num_deflate);
   ASSERT_EQ(2U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, zip_mode_smoke_trailer_zeros) {
@@ -286,11 +282,7 @@
   ASSERT_EQ(1U, num_deflate);
   ASSERT_EQ(2U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, image_mode_simple) {
@@ -333,11 +325,7 @@
   ASSERT_EQ(1U, num_deflate);
   ASSERT_EQ(2U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, image_mode_different_num_chunks) {
@@ -413,11 +401,7 @@
   ASSERT_EQ(1U, num_deflate);
   ASSERT_EQ(2U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, image_mode_spurious_magic) {
@@ -454,11 +438,7 @@
   ASSERT_EQ(0U, num_deflate);
   ASSERT_EQ(1U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, image_mode_short_input1) {
@@ -494,11 +474,7 @@
   ASSERT_EQ(0U, num_deflate);
   ASSERT_EQ(1U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, image_mode_short_input2) {
@@ -534,11 +510,7 @@
   ASSERT_EQ(0U, num_deflate);
   ASSERT_EQ(1U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
 
 TEST(ImgdiffTest, image_mode_single_entry_long) {
@@ -577,9 +549,5 @@
   ASSERT_EQ(0U, num_deflate);
   ASSERT_EQ(0U, num_raw);
 
-  std::string patched;
-  ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
-                               reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
-                               MemorySink, &patched));
-  ASSERT_EQ(tgt, patched);
+  verify_patched_image(src, patch, tgt);
 }
diff --git a/ui.cpp b/ui.cpp
index a796461..9194ae3 100644
--- a/ui.cpp
+++ b/ui.cpp
@@ -240,7 +240,7 @@
 }
 
 void* RecoveryUI::time_key_helper(void* cookie) {
-    key_timer_t* info = (key_timer_t*) cookie;
+    key_timer_t* info = static_cast<key_timer_t*>(cookie);
     info->ui->time_key(info->key_code, info->count);
     delete info;
     return nullptr;
diff --git a/updater/blockimg.cpp b/updater/blockimg.cpp
index 12ca151..8c0f885 100644
--- a/updater/blockimg.cpp
+++ b/updater/blockimg.cpp
@@ -240,57 +240,54 @@
     size_t p_remain;
 };
 
-static ssize_t RangeSinkWrite(const uint8_t* data, ssize_t size, void* token) {
-    RangeSinkState* rss = reinterpret_cast<RangeSinkState*>(token);
+static size_t RangeSinkWrite(const uint8_t* data, size_t size, RangeSinkState* rss) {
+  if (rss->p_remain == 0) {
+    LOG(ERROR) << "range sink write overrun";
+    return 0;
+  }
+
+  size_t written = 0;
+  while (size > 0) {
+    size_t write_now = size;
+
+    if (rss->p_remain < write_now) {
+      write_now = rss->p_remain;
+    }
+
+    if (write_all(rss->fd, data, write_now) == -1) {
+      break;
+    }
+
+    data += write_now;
+    size -= write_now;
+
+    rss->p_remain -= write_now;
+    written += write_now;
 
     if (rss->p_remain == 0) {
-        LOG(ERROR) << "range sink write overrun";
-        return 0;
+      // Move to the next block.
+      ++rss->p_block;
+      if (rss->p_block < rss->tgt.count) {
+        rss->p_remain =
+            (rss->tgt.pos[rss->p_block * 2 + 1] - rss->tgt.pos[rss->p_block * 2]) * BLOCKSIZE;
+
+        off64_t offset = static_cast<off64_t>(rss->tgt.pos[rss->p_block * 2]) * BLOCKSIZE;
+        if (!discard_blocks(rss->fd, offset, rss->p_remain)) {
+          break;
+        }
+
+        if (!check_lseek(rss->fd, offset, SEEK_SET)) {
+          break;
+        }
+
+      } else {
+        // We can't write any more; return how many bytes have been written so far.
+        break;
+      }
     }
+  }
 
-    ssize_t written = 0;
-    while (size > 0) {
-        size_t write_now = size;
-
-        if (rss->p_remain < write_now) {
-            write_now = rss->p_remain;
-        }
-
-        if (write_all(rss->fd, data, write_now) == -1) {
-            break;
-        }
-
-        data += write_now;
-        size -= write_now;
-
-        rss->p_remain -= write_now;
-        written += write_now;
-
-        if (rss->p_remain == 0) {
-            // move to the next block
-            ++rss->p_block;
-            if (rss->p_block < rss->tgt.count) {
-                rss->p_remain = (rss->tgt.pos[rss->p_block * 2 + 1] -
-                                 rss->tgt.pos[rss->p_block * 2]) * BLOCKSIZE;
-
-                off64_t offset = static_cast<off64_t>(rss->tgt.pos[rss->p_block*2]) * BLOCKSIZE;
-                if (!discard_blocks(rss->fd, offset, rss->p_remain)) {
-                    break;
-                }
-
-                if (!check_lseek(rss->fd, offset, SEEK_SET)) {
-                    break;
-                }
-
-            } else {
-                // we can't write any more; return how many bytes have
-                // been written so far.
-                break;
-            }
-        }
-    }
-
-    return written;
+  return written;
 }
 
 // All of the data for all the 'new' transfers is contained in one
@@ -338,7 +335,7 @@
 
         // At this point nti->rss is set, and we own it.  The main
         // thread is waiting for it to disappear from nti.
-        ssize_t written = RangeSinkWrite(data, size, nti->rss);
+        size_t written = RangeSinkWrite(data, size, nti->rss);
         data += written;
         size -= written;
 
@@ -356,7 +353,7 @@
 }
 
 static void* unzip_new_data(void* cookie) {
-    NewThreadInfo* nti = (NewThreadInfo*) cookie;
+    NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie);
     ProcessZipEntryContents(nti->za, &nti->entry, receive_new_data, nti);
     return nullptr;
 }
@@ -429,46 +426,11 @@
     uint8_t* patch_start;
 };
 
-// Do a source/target load for move/bsdiff/imgdiff in version 1.
-// We expect to parse the remainder of the parameter tokens as:
-//
-//    <src_range> <tgt_range>
-//
-// The source range is loaded into the provided buffer, reallocating
-// it to make it larger if necessary.
-
-static int LoadSrcTgtVersion1(CommandParameters& params, RangeSet& tgt, size_t& src_blocks,
-        std::vector<uint8_t>& buffer, int fd) {
-
-    if (params.cpos + 1 >= params.tokens.size()) {
-        LOG(ERROR) << "invalid parameters";
-        return -1;
-    }
-
-    // <src_range>
-    RangeSet src = parse_range(params.tokens[params.cpos++]);
-
-    // <tgt_range>
-    tgt = parse_range(params.tokens[params.cpos++]);
-
-    allocate(src.size * BLOCKSIZE, buffer);
-    int rc = ReadBlocks(src, buffer, fd);
-    src_blocks = src.size;
-
-    return rc;
-}
-
 // Print the hash in hex for corrupted source blocks (excluding the stashed blocks which is
 // handled separately).
 static void PrintHashForCorruptedSourceBlocks(const CommandParameters& params,
                                               const std::vector<uint8_t>& buffer) {
   LOG(INFO) << "unexpected contents of source blocks in cmd:\n" << params.cmdline;
-  if (params.version < 3) {
-    // TODO handle version 1,2
-    LOG(WARNING) << "version number " << params.version << " is not supported to print hashes";
-    return;
-  }
-
   CHECK(params.tokens[0] == "move" || params.tokens[0] == "bsdiff" ||
         params.tokens[0] == "imgdiff");
 
@@ -651,8 +613,8 @@
   }
 }
 
-static int LoadStash(CommandParameters& params, const std::string& base, const std::string& id,
-        bool verify, size_t* blocks, std::vector<uint8_t>& buffer, bool printnoent) {
+static int LoadStash(CommandParameters& params, const std::string& id, bool verify, size_t* blocks,
+                     std::vector<uint8_t>& buffer, bool printnoent) {
     // In verify mode, if source range_set was saved for the given hash,
     // check contents in the source blocks first. If the check fails,
     // search for the stashed files on /cache as usual.
@@ -674,17 +636,13 @@
         }
     }
 
-    if (base.empty()) {
-        return -1;
-    }
-
     size_t blockcount = 0;
 
     if (!blocks) {
         blocks = &blockcount;
     }
 
-    std::string fn = GetStashFileName(base, id, "");
+    std::string fn = GetStashFileName(params.stashbase, id, "");
 
     struct stat sb;
     int res = stat(fn.c_str(), &sb);
@@ -735,7 +693,7 @@
 }
 
 static int WriteStash(const std::string& base, const std::string& id, int blocks,
-        std::vector<uint8_t>& buffer, bool checkspace, bool *exists) {
+                      std::vector<uint8_t>& buffer, bool checkspace, bool* exists) {
     if (base.empty()) {
         return -1;
     }
@@ -895,52 +853,6 @@
   return 0;  // Using existing directory
 }
 
-static int SaveStash(CommandParameters& params, const std::string& base,
-        std::vector<uint8_t>& buffer, int fd, bool usehash) {
-
-    // <stash_id> <src_range>
-    if (params.cpos + 1 >= params.tokens.size()) {
-        LOG(ERROR) << "missing id and/or src range fields in stash command";
-        return -1;
-    }
-    const std::string& id = params.tokens[params.cpos++];
-
-    size_t blocks = 0;
-    if (usehash && LoadStash(params, base, id, true, &blocks, buffer, false) == 0) {
-        // Stash file already exists and has expected contents. Do not
-        // read from source again, as the source may have been already
-        // overwritten during a previous attempt.
-        return 0;
-    }
-
-    RangeSet src = parse_range(params.tokens[params.cpos++]);
-
-    allocate(src.size * BLOCKSIZE, buffer);
-    if (ReadBlocks(src, buffer, fd) == -1) {
-        return -1;
-    }
-    blocks = src.size;
-    stash_map[id] = src;
-
-    if (usehash && VerifyBlocks(id, buffer, blocks, true) != 0) {
-        // Source blocks have unexpected contents. If we actually need this
-        // data later, this is an unrecoverable error. However, the command
-        // that uses the data may have already completed previously, so the
-        // possible failure will occur during source block verification.
-        LOG(ERROR) << "failed to load source blocks for stash " << id;
-        return 0;
-    }
-
-    // In verify mode, we don't need to stash any blocks.
-    if (!params.canwrite && usehash) {
-        return 0;
-    }
-
-    LOG(INFO) << "stashing " << blocks << " blocks to " << id;
-    params.stashed += blocks;
-    return WriteStash(base, id, blocks, buffer, false, nullptr);
-}
-
 static int FreeStash(const std::string& base, const std::string& id) {
   if (base.empty() || id.empty()) {
     return -1;
@@ -968,265 +880,294 @@
     }
 }
 
-// Do a source/target load for move/bsdiff/imgdiff in version 2.
-// We expect to parse the remainder of the parameter tokens as one of:
-//
-//    <tgt_range> <src_block_count> <src_range>
-//        (loads data from source image only)
-//
-//    <tgt_range> <src_block_count> - <[stash_id:stash_range] ...>
-//        (loads data from stashes only)
-//
-//    <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...>
-//        (loads data from both source image and stashes)
-//
-// On return, buffer is filled with the loaded source data (rearranged
-// and combined with stashed data as necessary).  buffer may be
-// reallocated if needed to accommodate the source data.  *tgt is the
-// target RangeSet.  Any stashes required are loaded using LoadStash.
+/**
+ * We expect to parse the remainder of the parameter tokens as one of:
+ *
+ *    <src_block_count> <src_range>
+ *        (loads data from source image only)
+ *
+ *    <src_block_count> - <[stash_id:stash_range] ...>
+ *        (loads data from stashes only)
+ *
+ *    <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...>
+ *        (loads data from both source image and stashes)
+ *
+ * On return, params.buffer is filled with the loaded source data (rearranged and combined with
+ * stashed data as necessary). buffer may be reallocated if needed to accommodate the source data.
+ * tgt is the target RangeSet for detecting overlaps. Any stashes required are loaded using
+ * LoadStash.
+ */
+static int LoadSourceBlocks(CommandParameters& params, const RangeSet& tgt, size_t* src_blocks,
+                            bool* overlap) {
+  CHECK(src_blocks != nullptr);
+  CHECK(overlap != nullptr);
 
-static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& src_blocks,
-        std::vector<uint8_t>& buffer, int fd, const std::string& stashbase, bool* overlap) {
+  // <src_block_count>
+  const std::string& token = params.tokens[params.cpos++];
+  if (!android::base::ParseUint(token, src_blocks)) {
+    LOG(ERROR) << "invalid src_block_count \"" << token << "\"";
+    return -1;
+  }
 
-    // At least it needs to provide three parameters: <tgt_range>,
-    // <src_block_count> and "-"/<src_range>.
-    if (params.cpos + 2 >= params.tokens.size()) {
-        LOG(ERROR) << "invalid parameters";
-        return -1;
+  allocate(*src_blocks * BLOCKSIZE, params.buffer);
+
+  // "-" or <src_range> [<src_loc>]
+  if (params.tokens[params.cpos] == "-") {
+    // no source ranges, only stashes
+    params.cpos++;
+  } else {
+    RangeSet src = parse_range(params.tokens[params.cpos++]);
+    *overlap = range_overlaps(src, tgt);
+
+    if (ReadBlocks(src, params.buffer, params.fd) == -1) {
+      return -1;
     }
 
-    // <tgt_range>
-    tgt = parse_range(params.tokens[params.cpos++]);
-
-    // <src_block_count>
-    const std::string& token = params.tokens[params.cpos++];
-    if (!android::base::ParseUint(token.c_str(), &src_blocks)) {
-        LOG(ERROR) << "invalid src_block_count \"" << token << "\"";
-        return -1;
-    }
-
-    allocate(src_blocks * BLOCKSIZE, buffer);
-
-    // "-" or <src_range> [<src_loc>]
-    if (params.tokens[params.cpos] == "-") {
-        // no source ranges, only stashes
-        params.cpos++;
-    } else {
-        RangeSet src = parse_range(params.tokens[params.cpos++]);
-        int res = ReadBlocks(src, buffer, fd);
-
-        if (overlap) {
-            *overlap = range_overlaps(src, tgt);
-        }
-
-        if (res == -1) {
-            return -1;
-        }
-
-        if (params.cpos >= params.tokens.size()) {
-            // no stashes, only source range
-            return 0;
-        }
-
-        RangeSet locs = parse_range(params.tokens[params.cpos++]);
-        MoveRange(buffer, locs, buffer);
-    }
-
-    // <[stash_id:stash_range]>
-    while (params.cpos < params.tokens.size()) {
-        // Each word is a an index into the stash table, a colon, and
-        // then a rangeset describing where in the source block that
-        // stashed data should go.
-        std::vector<std::string> tokens = android::base::Split(params.tokens[params.cpos++], ":");
-        if (tokens.size() != 2) {
-            LOG(ERROR) << "invalid parameter";
-            return -1;
-        }
-
-        std::vector<uint8_t> stash;
-        int res = LoadStash(params, stashbase, tokens[0], false, nullptr, stash, true);
-
-        if (res == -1) {
-            // These source blocks will fail verification if used later, but we
-            // will let the caller decide if this is a fatal failure
-            LOG(ERROR) << "failed to load stash " << tokens[0];
-            continue;
-        }
-
-        RangeSet locs = parse_range(tokens[1]);
-
-        MoveRange(buffer, locs, stash);
-    }
-
-    return 0;
-}
-
-// Do a source/target load for move/bsdiff/imgdiff in version 3.
-//
-// Parameters are the same as for LoadSrcTgtVersion2, except for 'onehash', which
-// tells the function whether to expect separate source and targe block hashes, or
-// if they are both the same and only one hash should be expected, and
-// 'isunresumable', which receives a non-zero value if block verification fails in
-// a way that the update cannot be resumed anymore.
-//
-// If the function is unable to load the necessary blocks or their contents don't
-// match the hashes, the return value is -1 and the command should be aborted.
-//
-// If the return value is 1, the command has already been completed according to
-// the contents of the target blocks, and should not be performed again.
-//
-// If the return value is 0, source blocks have expected content and the command
-// can be performed.
-
-static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& src_blocks,
-        bool onehash, bool& overlap) {
-
     if (params.cpos >= params.tokens.size()) {
-        LOG(ERROR) << "missing source hash";
-        return -1;
+      // no stashes, only source range
+      return 0;
     }
 
-    std::string srchash = params.tokens[params.cpos++];
-    std::string tgthash;
+    RangeSet locs = parse_range(params.tokens[params.cpos++]);
+    MoveRange(params.buffer, locs, params.buffer);
+  }
 
-    if (onehash) {
-        tgthash = srchash;
-    } else {
-        if (params.cpos >= params.tokens.size()) {
-            LOG(ERROR) << "missing target hash";
-            return -1;
-        }
-        tgthash = params.tokens[params.cpos++];
+  // <[stash_id:stash_range]>
+  while (params.cpos < params.tokens.size()) {
+    // Each word is a an index into the stash table, a colon, and then a RangeSet describing where
+    // in the source block that stashed data should go.
+    std::vector<std::string> tokens = android::base::Split(params.tokens[params.cpos++], ":");
+    if (tokens.size() != 2) {
+      LOG(ERROR) << "invalid parameter";
+      return -1;
     }
 
-    if (LoadSrcTgtVersion2(params, tgt, src_blocks, params.buffer, params.fd,
-                           params.stashbase, &overlap) == -1) {
-        return -1;
+    std::vector<uint8_t> stash;
+    if (LoadStash(params, tokens[0], false, nullptr, stash, true) == -1) {
+      // These source blocks will fail verification if used later, but we
+      // will let the caller decide if this is a fatal failure
+      LOG(ERROR) << "failed to load stash " << tokens[0];
+      continue;
     }
 
-    std::vector<uint8_t> tgtbuffer(tgt.size * BLOCKSIZE);
+    RangeSet locs = parse_range(tokens[1]);
+    MoveRange(params.buffer, locs, stash);
+  }
 
-    if (ReadBlocks(tgt, tgtbuffer, params.fd) == -1) {
-        return -1;
-    }
+  return 0;
+}
 
-    if (VerifyBlocks(tgthash, tgtbuffer, tgt.size, false) == 0) {
-        // Target blocks already have expected content, command should be skipped
-        return 1;
-    }
+/**
+ * Do a source/target load for move/bsdiff/imgdiff in version 3.
+ *
+ * We expect to parse the remainder of the parameter tokens as one of:
+ *
+ *    <tgt_range> <src_block_count> <src_range>
+ *        (loads data from source image only)
+ *
+ *    <tgt_range> <src_block_count> - <[stash_id:stash_range] ...>
+ *        (loads data from stashes only)
+ *
+ *    <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...>
+ *        (loads data from both source image and stashes)
+ *
+ * 'onehash' tells whether to expect separate source and targe block hashes, or if they are both the
+ * same and only one hash should be expected. params.isunresumable will be set to true if block
+ * verification fails in a way that the update cannot be resumed anymore.
+ *
+ * If the function is unable to load the necessary blocks or their contents don't match the hashes,
+ * the return value is -1 and the command should be aborted.
+ *
+ * If the return value is 1, the command has already been completed according to the contents of the
+ * target blocks, and should not be performed again.
+ *
+ * If the return value is 0, source blocks have expected content and the command can be performed.
+ */
+static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t* src_blocks,
+                              bool onehash, bool* overlap) {
+  CHECK(src_blocks != nullptr);
+  CHECK(overlap != nullptr);
 
-    if (VerifyBlocks(srchash, params.buffer, src_blocks, true) == 0) {
-        // If source and target blocks overlap, stash the source blocks so we can
-        // resume from possible write errors. In verify mode, we can skip stashing
-        // because the source blocks won't be overwritten.
-        if (overlap && params.canwrite) {
-            LOG(INFO) << "stashing " << src_blocks << " overlapping blocks to " << srchash;
-
-            bool stash_exists = false;
-            if (WriteStash(params.stashbase, srchash, src_blocks, params.buffer, true,
-                           &stash_exists) != 0) {
-                LOG(ERROR) << "failed to stash overlapping source blocks";
-                return -1;
-            }
-
-            params.stashed += src_blocks;
-            // Can be deleted when the write has completed
-            if (!stash_exists) {
-                params.freestash = srchash;
-            }
-        }
-
-        // Source blocks have expected content, command can proceed
-        return 0;
-    }
-
-    if (overlap && LoadStash(params, params.stashbase, srchash, true, nullptr, params.buffer,
-                             true) == 0) {
-        // Overlapping source blocks were previously stashed, command can proceed.
-        // We are recovering from an interrupted command, so we don't know if the
-        // stash can safely be deleted after this command.
-        return 0;
-    }
-
-    // Valid source data not available, update cannot be resumed
-    LOG(ERROR) << "partition has unexpected contents";
-    PrintHashForCorruptedSourceBlocks(params, params.buffer);
-
-    params.isunresumable = true;
-
+  if (params.cpos >= params.tokens.size()) {
+    LOG(ERROR) << "missing source hash";
     return -1;
+  }
+
+  std::string srchash = params.tokens[params.cpos++];
+  std::string tgthash;
+
+  if (onehash) {
+    tgthash = srchash;
+  } else {
+    if (params.cpos >= params.tokens.size()) {
+      LOG(ERROR) << "missing target hash";
+      return -1;
+    }
+    tgthash = params.tokens[params.cpos++];
+  }
+
+  // At least it needs to provide three parameters: <tgt_range>, <src_block_count> and
+  // "-"/<src_range>.
+  if (params.cpos + 2 >= params.tokens.size()) {
+    LOG(ERROR) << "invalid parameters";
+    return -1;
+  }
+
+  // <tgt_range>
+  tgt = parse_range(params.tokens[params.cpos++]);
+
+  std::vector<uint8_t> tgtbuffer(tgt.size * BLOCKSIZE);
+  if (ReadBlocks(tgt, tgtbuffer, params.fd) == -1) {
+    return -1;
+  }
+
+  // Return now if target blocks already have expected content.
+  if (VerifyBlocks(tgthash, tgtbuffer, tgt.size, false) == 0) {
+    return 1;
+  }
+
+  // Load source blocks.
+  if (LoadSourceBlocks(params, tgt, src_blocks, overlap) == -1) {
+    return -1;
+  }
+
+  if (VerifyBlocks(srchash, params.buffer, *src_blocks, true) == 0) {
+    // If source and target blocks overlap, stash the source blocks so we can
+    // resume from possible write errors. In verify mode, we can skip stashing
+    // because the source blocks won't be overwritten.
+    if (*overlap && params.canwrite) {
+      LOG(INFO) << "stashing " << *src_blocks << " overlapping blocks to " << srchash;
+
+      bool stash_exists = false;
+      if (WriteStash(params.stashbase, srchash, *src_blocks, params.buffer, true,
+                     &stash_exists) != 0) {
+        LOG(ERROR) << "failed to stash overlapping source blocks";
+        return -1;
+      }
+
+      params.stashed += *src_blocks;
+      // Can be deleted when the write has completed.
+      if (!stash_exists) {
+        params.freestash = srchash;
+      }
+    }
+
+    // Source blocks have expected content, command can proceed.
+    return 0;
+  }
+
+  if (*overlap && LoadStash(params, srchash, true, nullptr, params.buffer, true) == 0) {
+    // Overlapping source blocks were previously stashed, command can proceed. We are recovering
+    // from an interrupted command, so we don't know if the stash can safely be deleted after this
+    // command.
+    return 0;
+  }
+
+  // Valid source data not available, update cannot be resumed.
+  LOG(ERROR) << "partition has unexpected contents";
+  PrintHashForCorruptedSourceBlocks(params, params.buffer);
+
+  params.isunresumable = true;
+
+  return -1;
 }
 
 static int PerformCommandMove(CommandParameters& params) {
-    size_t blocks = 0;
-    bool overlap = false;
-    int status = 0;
-    RangeSet tgt;
+  size_t blocks = 0;
+  bool overlap = false;
+  RangeSet tgt;
+  int status = LoadSrcTgtVersion3(params, tgt, &blocks, true, &overlap);
 
-    if (params.version == 1) {
-        status = LoadSrcTgtVersion1(params, tgt, blocks, params.buffer, params.fd);
-    } else if (params.version == 2) {
-        status = LoadSrcTgtVersion2(params, tgt, blocks, params.buffer, params.fd,
-                params.stashbase, nullptr);
-    } else if (params.version >= 3) {
-        status = LoadSrcTgtVersion3(params, tgt, blocks, true, overlap);
-    }
+  if (status == -1) {
+    LOG(ERROR) << "failed to read blocks for move";
+    return -1;
+  }
 
-    if (status == -1) {
-        LOG(ERROR) << "failed to read blocks for move";
-        return -1;
-    }
+  if (status == 0) {
+    params.foundwrites = true;
+  } else if (params.foundwrites) {
+    LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
+  }
 
+  if (params.canwrite) {
     if (status == 0) {
-        params.foundwrites = true;
-    } else if (params.foundwrites) {
-        LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
+      LOG(INFO) << "  moving " << blocks << " blocks";
+
+      if (WriteBlocks(tgt, params.buffer, params.fd) == -1) {
+        return -1;
+      }
+    } else {
+      LOG(INFO) << "skipping " << blocks << " already moved blocks";
     }
+  }
 
-    if (params.canwrite) {
-        if (status == 0) {
-            LOG(INFO) << "  moving " << blocks << " blocks";
+  if (!params.freestash.empty()) {
+    FreeStash(params.stashbase, params.freestash);
+    params.freestash.clear();
+  }
 
-            if (WriteBlocks(tgt, params.buffer, params.fd) == -1) {
-                return -1;
-            }
-        } else {
-            LOG(INFO) << "skipping " << blocks << " already moved blocks";
-        }
+  params.written += tgt.size;
 
-    }
-
-    if (!params.freestash.empty()) {
-        FreeStash(params.stashbase, params.freestash);
-        params.freestash.clear();
-    }
-
-    params.written += tgt.size;
-
-    return 0;
+  return 0;
 }
 
 static int PerformCommandStash(CommandParameters& params) {
-    return SaveStash(params, params.stashbase, params.buffer, params.fd,
-            (params.version >= 3));
+  // <stash_id> <src_range>
+  if (params.cpos + 1 >= params.tokens.size()) {
+    LOG(ERROR) << "missing id and/or src range fields in stash command";
+    return -1;
+  }
+
+  const std::string& id = params.tokens[params.cpos++];
+  size_t blocks = 0;
+  if (LoadStash(params, id, true, &blocks, params.buffer, false) == 0) {
+    // Stash file already exists and has expected contents. Do not read from source again, as the
+    // source may have been already overwritten during a previous attempt.
+    return 0;
+  }
+
+  RangeSet src = parse_range(params.tokens[params.cpos++]);
+
+  allocate(src.size * BLOCKSIZE, params.buffer);
+  if (ReadBlocks(src, params.buffer, params.fd) == -1) {
+    return -1;
+  }
+  blocks = src.size;
+  stash_map[id] = src;
+
+  if (VerifyBlocks(id, params.buffer, blocks, true) != 0) {
+    // Source blocks have unexpected contents. If we actually need this data later, this is an
+    // unrecoverable error. However, the command that uses the data may have already completed
+    // previously, so the possible failure will occur during source block verification.
+    LOG(ERROR) << "failed to load source blocks for stash " << id;
+    return 0;
+  }
+
+  // In verify mode, we don't need to stash any blocks.
+  if (!params.canwrite) {
+    return 0;
+  }
+
+  LOG(INFO) << "stashing " << blocks << " blocks to " << id;
+  params.stashed += blocks;
+  return WriteStash(params.stashbase, id, blocks, params.buffer, false, nullptr);
 }
 
 static int PerformCommandFree(CommandParameters& params) {
-    // <stash_id>
-    if (params.cpos >= params.tokens.size()) {
-        LOG(ERROR) << "missing stash id in free command";
-        return -1;
-    }
+  // <stash_id>
+  if (params.cpos >= params.tokens.size()) {
+    LOG(ERROR) << "missing stash id in free command";
+    return -1;
+  }
 
-    const std::string& id = params.tokens[params.cpos++];
+  const std::string& id = params.tokens[params.cpos++];
+  stash_map.erase(id);
 
-    stash_map.erase(id);
+  if (params.createdstash || params.canwrite) {
+    return FreeStash(params.stashbase, id);
+  }
 
-    if (params.createdstash || params.canwrite) {
-        return FreeStash(params.stashbase, id);
-    }
-
-    return 0;
+  return 0;
 }
 
 static int PerformCommandZero(CommandParameters& params) {
@@ -1315,100 +1256,95 @@
 }
 
 static int PerformCommandDiff(CommandParameters& params) {
+  // <offset> <length>
+  if (params.cpos + 1 >= params.tokens.size()) {
+    LOG(ERROR) << "missing patch offset or length for " << params.cmdname;
+    return -1;
+  }
 
-    // <offset> <length>
-    if (params.cpos + 1 >= params.tokens.size()) {
-        LOG(ERROR) << "missing patch offset or length for " << params.cmdname;
-        return -1;
-    }
+  size_t offset;
+  if (!android::base::ParseUint(params.tokens[params.cpos++], &offset)) {
+    LOG(ERROR) << "invalid patch offset";
+    return -1;
+  }
 
-    size_t offset;
-    if (!android::base::ParseUint(params.tokens[params.cpos++].c_str(), &offset)) {
-        LOG(ERROR) << "invalid patch offset";
-        return -1;
-    }
+  size_t len;
+  if (!android::base::ParseUint(params.tokens[params.cpos++], &len)) {
+    LOG(ERROR) << "invalid patch len";
+    return -1;
+  }
 
-    size_t len;
-    if (!android::base::ParseUint(params.tokens[params.cpos++].c_str(), &len)) {
-        LOG(ERROR) << "invalid patch len";
-        return -1;
-    }
+  RangeSet tgt;
+  size_t blocks = 0;
+  bool overlap = false;
+  int status = LoadSrcTgtVersion3(params, tgt, &blocks, false, &overlap);
 
-    RangeSet tgt;
-    size_t blocks = 0;
-    bool overlap = false;
-    int status = 0;
-    if (params.version == 1) {
-        status = LoadSrcTgtVersion1(params, tgt, blocks, params.buffer, params.fd);
-    } else if (params.version == 2) {
-        status = LoadSrcTgtVersion2(params, tgt, blocks, params.buffer, params.fd,
-                params.stashbase, nullptr);
-    } else if (params.version >= 3) {
-        status = LoadSrcTgtVersion3(params, tgt, blocks, false, overlap);
-    }
+  if (status == -1) {
+    LOG(ERROR) << "failed to read blocks for diff";
+    return -1;
+  }
 
-    if (status == -1) {
-        LOG(ERROR) << "failed to read blocks for diff";
-        return -1;
-    }
+  if (status == 0) {
+    params.foundwrites = true;
+  } else if (params.foundwrites) {
+    LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
+  }
 
+  if (params.canwrite) {
     if (status == 0) {
-        params.foundwrites = true;
-    } else if (params.foundwrites) {
-        LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]";
-    }
+      LOG(INFO) << "patching " << blocks << " blocks to " << tgt.size;
+      Value patch_value(
+          VAL_BLOB, std::string(reinterpret_cast<const char*>(params.patch_start + offset), len));
+      RangeSinkState rss(tgt);
+      rss.fd = params.fd;
+      rss.p_block = 0;
+      rss.p_remain = (tgt.pos[1] - tgt.pos[0]) * BLOCKSIZE;
 
-    if (params.canwrite) {
-        if (status == 0) {
-            LOG(INFO) << "patching " << blocks << " blocks to " << tgt.size;
-            Value patch_value(VAL_BLOB,
-                    std::string(reinterpret_cast<const char*>(params.patch_start + offset), len));
-            RangeSinkState rss(tgt);
-            rss.fd = params.fd;
-            rss.p_block = 0;
-            rss.p_remain = (tgt.pos[1] - tgt.pos[0]) * BLOCKSIZE;
+      off64_t offset = static_cast<off64_t>(tgt.pos[0]) * BLOCKSIZE;
+      if (!discard_blocks(params.fd, offset, rss.p_remain)) {
+        return -1;
+      }
 
-            off64_t offset = static_cast<off64_t>(tgt.pos[0]) * BLOCKSIZE;
-            if (!discard_blocks(params.fd, offset, rss.p_remain)) {
-                return -1;
-            }
+      if (!check_lseek(params.fd, offset, SEEK_SET)) {
+        return -1;
+      }
 
-            if (!check_lseek(params.fd, offset, SEEK_SET)) {
-                return -1;
-            }
-
-            if (params.cmdname[0] == 'i') {      // imgdiff
-                if (ApplyImagePatch(params.buffer.data(), blocks * BLOCKSIZE, &patch_value,
-                        &RangeSinkWrite, &rss, nullptr, nullptr) != 0) {
-                    LOG(ERROR) << "Failed to apply image patch.";
-                    return -1;
-                }
-            } else {
-                if (ApplyBSDiffPatch(params.buffer.data(), blocks * BLOCKSIZE, &patch_value,
-                        0, &RangeSinkWrite, &rss, nullptr) != 0) {
-                    LOG(ERROR) << "Failed to apply bsdiff patch.";
-                    return -1;
-                }
-            }
-
-            // We expect the output of the patcher to fill the tgt ranges exactly.
-            if (rss.p_block != tgt.count || rss.p_remain != 0) {
-                LOG(ERROR) << "range sink underrun?";
-            }
-        } else {
-            LOG(INFO) << "skipping " << blocks << " blocks already patched to " << tgt.size
-                      << " [" << params.cmdline << "]";
+      if (params.cmdname[0] == 'i') {  // imgdiff
+        if (ApplyImagePatch(
+                params.buffer.data(), blocks * BLOCKSIZE, &patch_value,
+                std::bind(&RangeSinkWrite, std::placeholders::_1, std::placeholders::_2, &rss),
+                nullptr, nullptr) != 0) {
+          LOG(ERROR) << "Failed to apply image patch.";
+          return -1;
         }
+      } else {
+        if (ApplyBSDiffPatch(
+                params.buffer.data(), blocks * BLOCKSIZE, &patch_value, 0,
+                std::bind(&RangeSinkWrite, std::placeholders::_1, std::placeholders::_2, &rss),
+                nullptr) != 0) {
+          LOG(ERROR) << "Failed to apply bsdiff patch.";
+          return -1;
+        }
+      }
+
+      // We expect the output of the patcher to fill the tgt ranges exactly.
+      if (rss.p_block != tgt.count || rss.p_remain != 0) {
+        LOG(ERROR) << "range sink underrun?";
+      }
+    } else {
+      LOG(INFO) << "skipping " << blocks << " blocks already patched to " << tgt.size << " ["
+                << params.cmdline << "]";
     }
+  }
 
-    if (!params.freestash.empty()) {
-        FreeStash(params.stashbase, params.freestash);
-        params.freestash.clear();
-    }
+  if (!params.freestash.empty()) {
+    FreeStash(params.stashbase, params.freestash);
+    params.freestash.clear();
+  }
 
-    params.written += tgt.size;
+  params.written += tgt.size;
 
-    return 0;
+  return 0;
 }
 
 static int PerformCommandErase(CommandParameters& params) {
@@ -1471,301 +1407,281 @@
 static Value* PerformBlockImageUpdate(const char* name, State* state,
                                       const std::vector<std::unique_ptr<Expr>>& argv,
                                       const Command* commands, size_t cmdcount, bool dryrun) {
-    CommandParameters params = {};
-    params.canwrite = !dryrun;
+  CommandParameters params = {};
+  params.canwrite = !dryrun;
 
-    LOG(INFO) << "performing " << (dryrun ? "verification" : "update");
-    if (state->is_retry) {
-        is_retry = true;
-        LOG(INFO) << "This update is a retry.";
+  LOG(INFO) << "performing " << (dryrun ? "verification" : "update");
+  if (state->is_retry) {
+    is_retry = true;
+    LOG(INFO) << "This update is a retry.";
+  }
+  if (argv.size() != 4) {
+    ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu",
+               argv.size());
+    return StringValue("");
+  }
+
+  std::vector<std::unique_ptr<Value>> args;
+  if (!ReadValueArgs(state, argv, &args)) {
+    return nullptr;
+  }
+
+  const Value* blockdev_filename = args[0].get();
+  const Value* transfer_list_value = args[1].get();
+  const Value* new_data_fn = args[2].get();
+  const Value* patch_data_fn = args[3].get();
+
+  if (blockdev_filename->type != VAL_STRING) {
+    ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", name);
+    return StringValue("");
+  }
+  if (transfer_list_value->type != VAL_BLOB) {
+    ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name);
+    return StringValue("");
+  }
+  if (new_data_fn->type != VAL_STRING) {
+    ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name);
+    return StringValue("");
+  }
+  if (patch_data_fn->type != VAL_STRING) {
+    ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string", name);
+    return StringValue("");
+  }
+
+  UpdaterInfo* ui = static_cast<UpdaterInfo*>(state->cookie);
+  if (ui == nullptr) {
+    return StringValue("");
+  }
+
+  FILE* cmd_pipe = ui->cmd_pipe;
+  ZipArchiveHandle za = ui->package_zip;
+
+  if (cmd_pipe == nullptr || za == nullptr) {
+    return StringValue("");
+  }
+
+  ZipString path_data(patch_data_fn->data.c_str());
+  ZipEntry patch_entry;
+  if (FindEntry(za, path_data, &patch_entry) != 0) {
+    LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package";
+    return StringValue("");
+  }
+
+  params.patch_start = ui->package_zip_addr + patch_entry.offset;
+  ZipString new_data(new_data_fn->data.c_str());
+  ZipEntry new_entry;
+  if (FindEntry(za, new_data, &new_entry) != 0) {
+    LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package";
+    return StringValue("");
+  }
+
+  params.fd.reset(TEMP_FAILURE_RETRY(ota_open(blockdev_filename->data.c_str(), O_RDWR)));
+  if (params.fd == -1) {
+    PLOG(ERROR) << "open \"" << blockdev_filename->data << "\" failed";
+    return StringValue("");
+  }
+
+  if (params.canwrite) {
+    params.nti.za = za;
+    params.nti.entry = new_entry;
+
+    pthread_mutex_init(&params.nti.mu, nullptr);
+    pthread_cond_init(&params.nti.cv, nullptr);
+    pthread_attr_t attr;
+    pthread_attr_init(&attr);
+    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+    int error = pthread_create(&params.thread, &attr, unzip_new_data, &params.nti);
+    if (error != 0) {
+      PLOG(ERROR) << "pthread_create failed";
+      return StringValue("");
     }
-    if (argv.size() != 4) {
-        ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu",
-                   argv.size());
-        return StringValue("");
+  }
+
+  std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n");
+  if (lines.size() < 2) {
+    ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zd]\n",
+               lines.size());
+    return StringValue("");
+  }
+
+  // First line in transfer list is the version number.
+  if (!android::base::ParseInt(lines[0], &params.version, 3, 4)) {
+    LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]";
+    return StringValue("");
+  }
+
+  LOG(INFO) << "blockimg version is " << params.version;
+
+  // Second line in transfer list is the total number of blocks we expect to write.
+  size_t total_blocks;
+  if (!android::base::ParseUint(lines[1], &total_blocks)) {
+    ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]\n", lines[1].c_str());
+    return StringValue("");
+  }
+
+  if (total_blocks == 0) {
+    return StringValue("t");
+  }
+
+  size_t start = 2;
+  if (lines.size() < 4) {
+    ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]\n",
+               lines.size());
+    return StringValue("");
+  }
+
+  // Third line is how many stash entries are needed simultaneously.
+  LOG(INFO) << "maximum stash entries " << lines[2];
+
+  // Fourth line is the maximum number of blocks that will be stashed simultaneously
+  size_t stash_max_blocks;
+  if (!android::base::ParseUint(lines[3], &stash_max_blocks)) {
+    ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]\n",
+               lines[3].c_str());
+    return StringValue("");
+  }
+
+  int res = CreateStash(state, stash_max_blocks, blockdev_filename->data, params.stashbase);
+  if (res == -1) {
+    return StringValue("");
+  }
+
+  params.createdstash = res;
+
+  start += 2;
+
+  // Build a map of the available commands
+  std::unordered_map<std::string, const Command*> cmd_map;
+  for (size_t i = 0; i < cmdcount; ++i) {
+    if (cmd_map.find(commands[i].name) != cmd_map.end()) {
+      LOG(ERROR) << "Error: command [" << commands[i].name << "] already exists in the cmd map.";
+      return StringValue(strdup(""));
+    }
+    cmd_map[commands[i].name] = &commands[i];
+  }
+
+  int rc = -1;
+
+  // Subsequent lines are all individual transfer commands
+  for (auto it = lines.cbegin() + start; it != lines.cend(); it++) {
+    const std::string& line(*it);
+    if (line.empty()) continue;
+
+    params.tokens = android::base::Split(line, " ");
+    params.cpos = 0;
+    params.cmdname = params.tokens[params.cpos++].c_str();
+    params.cmdline = line.c_str();
+
+    if (cmd_map.find(params.cmdname) == cmd_map.end()) {
+      LOG(ERROR) << "unexpected command [" << params.cmdname << "]";
+      goto pbiudone;
     }
 
-    std::vector<std::unique_ptr<Value>> args;
-    if (!ReadValueArgs(state, argv, &args)) {
-        return nullptr;
-    }
+    const Command* cmd = cmd_map[params.cmdname];
 
-    const Value* blockdev_filename = args[0].get();
-    const Value* transfer_list_value = args[1].get();
-    const Value* new_data_fn = args[2].get();
-    const Value* patch_data_fn = args[3].get();
-
-    if (blockdev_filename->type != VAL_STRING) {
-        ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string",
-                   name);
-        return StringValue("");
-    }
-    if (transfer_list_value->type != VAL_BLOB) {
-        ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name);
-        return StringValue("");
-    }
-    if (new_data_fn->type != VAL_STRING) {
-        ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name);
-        return StringValue("");
-    }
-    if (patch_data_fn->type != VAL_STRING) {
-        ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string",
-                   name);
-        return StringValue("");
-    }
-
-    UpdaterInfo* ui = static_cast<UpdaterInfo*>(state->cookie);
-    if (ui == nullptr) {
-        return StringValue("");
-    }
-
-    FILE* cmd_pipe = ui->cmd_pipe;
-    ZipArchiveHandle za = ui->package_zip;
-
-    if (cmd_pipe == nullptr || za == nullptr) {
-        return StringValue("");
-    }
-
-    ZipString path_data(patch_data_fn->data.c_str());
-    ZipEntry patch_entry;
-    if (FindEntry(za, path_data, &patch_entry) != 0) {
-        LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package";
-        return StringValue("");
-    }
-
-    params.patch_start = ui->package_zip_addr + patch_entry.offset;
-    ZipString new_data(new_data_fn->data.c_str());
-    ZipEntry new_entry;
-    if (FindEntry(za, new_data, &new_entry) != 0) {
-        LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package";
-        return StringValue("");
-    }
-
-    params.fd.reset(TEMP_FAILURE_RETRY(ota_open(blockdev_filename->data.c_str(), O_RDWR)));
-    if (params.fd == -1) {
-        PLOG(ERROR) << "open \"" << blockdev_filename->data << "\" failed";
-        return StringValue("");
+    if (cmd->f != nullptr && cmd->f(params) == -1) {
+      LOG(ERROR) << "failed to execute command [" << line << "]";
+      goto pbiudone;
     }
 
     if (params.canwrite) {
-        params.nti.za = za;
-        params.nti.entry = new_entry;
-
-        pthread_mutex_init(&params.nti.mu, nullptr);
-        pthread_cond_init(&params.nti.cv, nullptr);
-        pthread_attr_t attr;
-        pthread_attr_init(&attr);
-        pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
-
-        int error = pthread_create(&params.thread, &attr, unzip_new_data, &params.nti);
-        if (error != 0) {
-            PLOG(ERROR) << "pthread_create failed";
-            return StringValue("");
-        }
-    }
-
-    std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n");
-    if (lines.size() < 2) {
-        ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zd]\n",
-                   lines.size());
-        return StringValue("");
-    }
-
-    // First line in transfer list is the version number
-    if (!android::base::ParseInt(lines[0], &params.version, 1, 4)) {
-        LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]";
-        return StringValue("");
-    }
-
-    LOG(INFO) << "blockimg version is " << params.version;
-
-    // Second line in transfer list is the total number of blocks we expect to write
-    size_t total_blocks;
-    if (!android::base::ParseUint(lines[1], &total_blocks)) {
-        ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]\n", lines[1].c_str());
-        return StringValue("");
-    }
-
-    if (total_blocks == 0) {
-        return StringValue("t");
-    }
-
-    size_t start = 2;
-    if (params.version >= 2) {
-        if (lines.size() < 4) {
-          ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]\n",
-                     lines.size());
-          return StringValue("");
-        }
-
-        // Third line is how many stash entries are needed simultaneously
-        LOG(INFO) << "maximum stash entries " << lines[2];
-
-        // Fourth line is the maximum number of blocks that will be stashed simultaneously
-        size_t stash_max_blocks;
-        if (!android::base::ParseUint(lines[3], &stash_max_blocks)) {
-            ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]\n",
-                       lines[3].c_str());
-            return StringValue("");
-        }
-
-        int res = CreateStash(state, stash_max_blocks, blockdev_filename->data, params.stashbase);
-        if (res == -1) {
-            return StringValue("");
-        }
-
-        params.createdstash = res;
-
-        start += 2;
-    }
-
-    // Build a map of the available commands
-    std::unordered_map<std::string, const Command*> cmd_map;
-    for (size_t i = 0; i < cmdcount; ++i) {
-        if (cmd_map.find(commands[i].name) != cmd_map.end()) {
-            LOG(ERROR) << "Error: command [" << commands[i].name
-                       << "] already exists in the cmd map.";
-            return StringValue(strdup(""));
-        }
-        cmd_map[commands[i].name] = &commands[i];
-    }
-
-    int rc = -1;
-
-    // Subsequent lines are all individual transfer commands
-    for (auto it = lines.cbegin() + start; it != lines.cend(); it++) {
-        const std::string& line(*it);
-        if (line.empty()) continue;
-
-        params.tokens = android::base::Split(line, " ");
-        params.cpos = 0;
-        params.cmdname = params.tokens[params.cpos++].c_str();
-        params.cmdline = line.c_str();
-
-        if (cmd_map.find(params.cmdname) == cmd_map.end()) {
-            LOG(ERROR) << "unexpected command [" << params.cmdname << "]";
-            goto pbiudone;
-        }
-
-        const Command* cmd = cmd_map[params.cmdname];
-
-        if (cmd->f != nullptr && cmd->f(params) == -1) {
-            LOG(ERROR) << "failed to execute command [" << line << "]";
-            goto pbiudone;
-        }
-
-        if (params.canwrite) {
-            if (ota_fsync(params.fd) == -1) {
-                failure_type = kFsyncFailure;
-                PLOG(ERROR) << "fsync failed";
-                goto pbiudone;
-            }
-            fprintf(cmd_pipe, "set_progress %.4f\n",
-                    static_cast<double>(params.written) / total_blocks);
-            fflush(cmd_pipe);
-        }
-    }
-
-    if (params.canwrite) {
-        pthread_join(params.thread, nullptr);
-
-        LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks;
-        LOG(INFO) << "stashed " << params.stashed << " blocks";
-        LOG(INFO) << "max alloc needed was " << params.buffer.size();
-
-        const char* partition = strrchr(blockdev_filename->data.c_str(), '/');
-        if (partition != nullptr && *(partition + 1) != 0) {
-            fprintf(cmd_pipe, "log bytes_written_%s: %zu\n", partition + 1,
-                    params.written * BLOCKSIZE);
-            fprintf(cmd_pipe, "log bytes_stashed_%s: %zu\n", partition + 1,
-                    params.stashed * BLOCKSIZE);
-            fflush(cmd_pipe);
-        }
-        // Delete stash only after successfully completing the update, as it
-        // may contain blocks needed to complete the update later.
-        DeleteStash(params.stashbase);
-    } else {
-        LOG(INFO) << "verified partition contents; update may be resumed";
-    }
-
-    rc = 0;
-
-pbiudone:
-    if (ota_fsync(params.fd) == -1) {
+      if (ota_fsync(params.fd) == -1) {
         failure_type = kFsyncFailure;
         PLOG(ERROR) << "fsync failed";
+        goto pbiudone;
+      }
+      fprintf(cmd_pipe, "set_progress %.4f\n", static_cast<double>(params.written) / total_blocks);
+      fflush(cmd_pipe);
     }
-    // params.fd will be automatically closed because it's a unique_fd.
+  }
 
-    // Only delete the stash if the update cannot be resumed, or it's
-    // a verification run and we created the stash.
-    if (params.isunresumable || (!params.canwrite && params.createdstash)) {
-        DeleteStash(params.stashbase);
+  if (params.canwrite) {
+    pthread_join(params.thread, nullptr);
+
+    LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks;
+    LOG(INFO) << "stashed " << params.stashed << " blocks";
+    LOG(INFO) << "max alloc needed was " << params.buffer.size();
+
+    const char* partition = strrchr(blockdev_filename->data.c_str(), '/');
+    if (partition != nullptr && *(partition + 1) != 0) {
+      fprintf(cmd_pipe, "log bytes_written_%s: %zu\n", partition + 1, params.written * BLOCKSIZE);
+      fprintf(cmd_pipe, "log bytes_stashed_%s: %zu\n", partition + 1, params.stashed * BLOCKSIZE);
+      fflush(cmd_pipe);
     }
+    // Delete stash only after successfully completing the update, as it may contain blocks needed
+    // to complete the update later.
+    DeleteStash(params.stashbase);
+  } else {
+    LOG(INFO) << "verified partition contents; update may be resumed";
+  }
 
-    if (failure_type != kNoCause && state->cause_code == kNoCause) {
-        state->cause_code = failure_type;
-    }
+  rc = 0;
 
-    return StringValue(rc == 0 ? "t" : "");
+pbiudone:
+  if (ota_fsync(params.fd) == -1) {
+    failure_type = kFsyncFailure;
+    PLOG(ERROR) << "fsync failed";
+  }
+  // params.fd will be automatically closed because it's a unique_fd.
+
+  // Only delete the stash if the update cannot be resumed, or it's a verification run and we
+  // created the stash.
+  if (params.isunresumable || (!params.canwrite && params.createdstash)) {
+    DeleteStash(params.stashbase);
+  }
+
+  if (failure_type != kNoCause && state->cause_code == kNoCause) {
+    state->cause_code = failure_type;
+  }
+
+  return StringValue(rc == 0 ? "t" : "");
 }
 
-// The transfer list is a text file containing commands to
-// transfer data from one place to another on the target
-// partition.  We parse it and execute the commands in order:
-//
-//    zero [rangeset]
-//      - fill the indicated blocks with zeros
-//
-//    new [rangeset]
-//      - fill the blocks with data read from the new_data file
-//
-//    erase [rangeset]
-//      - mark the given blocks as empty
-//
-//    move <...>
-//    bsdiff <patchstart> <patchlen> <...>
-//    imgdiff <patchstart> <patchlen> <...>
-//      - read the source blocks, apply a patch (or not in the
-//        case of move), write result to target blocks.  bsdiff or
-//        imgdiff specifies the type of patch; move means no patch
-//        at all.
-//
-//        The format of <...> differs between versions 1 and 2;
-//        see the LoadSrcTgtVersion{1,2}() functions for a
-//        description of what's expected.
-//
-//    stash <stash_id> <src_range>
-//      - (version 2+ only) load the given source range and stash
-//        the data in the given slot of the stash table.
-//
-//    free <stash_id>
-//      - (version 3+ only) free the given stash data.
-//
-// The creator of the transfer list will guarantee that no block
-// is read (ie, used as the source for a patch or move) after it
-// has been written.
-//
-// In version 2, the creator will guarantee that a given stash is
-// loaded (with a stash command) before it's used in a
-// move/bsdiff/imgdiff command.
-//
-// Within one command the source and target ranges may overlap so
-// in general we need to read the entire source into memory before
-// writing anything to the target blocks.
-//
-// All the patch data is concatenated into one patch_data file in
-// the update package.  It must be stored uncompressed because we
-// memory-map it in directly from the archive.  (Since patches are
-// already compressed, we lose very little by not compressing
-// their concatenation.)
-//
-// In version 3, commands that read data from the partition (i.e.
-// move/bsdiff/imgdiff/stash) have one or more additional hashes
-// before the range parameters, which are used to check if the
-// command has already been completed and verify the integrity of
-// the source data.
-
+/**
+ * The transfer list is a text file containing commands to transfer data from one place to another
+ * on the target partition. We parse it and execute the commands in order:
+ *
+ *    zero [rangeset]
+ *      - Fill the indicated blocks with zeros.
+ *
+ *    new [rangeset]
+ *      - Fill the blocks with data read from the new_data file.
+ *
+ *    erase [rangeset]
+ *      - Mark the given blocks as empty.
+ *
+ *    move <...>
+ *    bsdiff <patchstart> <patchlen> <...>
+ *    imgdiff <patchstart> <patchlen> <...>
+ *      - Read the source blocks, apply a patch (or not in the case of move), write result to target
+ *      blocks.  bsdiff or imgdiff specifies the type of patch; move means no patch at all.
+ *
+ *        See the comments in LoadSrcTgtVersion3() for a description of the <...> format.
+ *
+ *    stash <stash_id> <src_range>
+ *      - Load the given source range and stash the data in the given slot of the stash table.
+ *
+ *    free <stash_id>
+ *      - Free the given stash data.
+ *
+ * The creator of the transfer list will guarantee that no block is read (ie, used as the source for
+ * a patch or move) after it has been written.
+ *
+ * The creator will guarantee that a given stash is loaded (with a stash command) before it's used
+ * in a move/bsdiff/imgdiff command.
+ *
+ * Within one command the source and target ranges may overlap so in general we need to read the
+ * entire source into memory before writing anything to the target blocks.
+ *
+ * All the patch data is concatenated into one patch_data file in the update package. It must be
+ * stored uncompressed because we memory-map it in directly from the archive. (Since patches are
+ * already compressed, we lose very little by not compressing their concatenation.)
+ *
+ * Commands that read data from the partition (i.e. move/bsdiff/imgdiff/stash) have one or more
+ * additional hashes before the range parameters, which are used to check if the command has already
+ * been completed and verify the integrity of the source data.
+ */
 Value* BlockImageVerifyFn(const char* name, State* state,
                           const std::vector<std::unique_ptr<Expr>>& argv) {
     // Commands which are not tested are set to nullptr to skip them completely
@@ -1952,7 +1868,7 @@
     LOG(INFO) << filename->data << " image corrupted, attempting to recover...";
 
     // When opened with O_RDWR, libfec rewrites corrupted blocks when they are read
-    fec::io fh(filename->data.c_str(), O_RDWR);
+    fec::io fh(filename->data, O_RDWR);
 
     if (!fh) {
         ErrorAbort(state, kLibfecFailure, "fec_open \"%s\" failed: %s", filename->data.c_str(),
diff --git a/verifier.cpp b/verifier.cpp
index e9d540c..23142c1 100644
--- a/verifier.cpp
+++ b/verifier.cpp
@@ -370,7 +370,7 @@
 }
 
 struct BNDeleter {
-  void operator()(BIGNUM* bn) {
+  void operator()(BIGNUM* bn) const {
     BN_free(bn);
   }
 };
diff --git a/verifier.h b/verifier.h
index 6bee749..6fa8f2b 100644
--- a/verifier.h
+++ b/verifier.h
@@ -26,13 +26,13 @@
 #include <openssl/sha.h>
 
 struct RSADeleter {
-  void operator()(RSA* rsa) {
+  void operator()(RSA* rsa) const {
     RSA_free(rsa);
   }
 };
 
 struct ECKEYDeleter {
-  void operator()(EC_KEY* ec_key) {
+  void operator()(EC_KEY* ec_key) const {
     EC_KEY_free(ec_key);
   }
 };
diff --git a/wear_touch.cpp b/wear_touch.cpp
index cf33daa..e2ab44d 100644
--- a/wear_touch.cpp
+++ b/wear_touch.cpp
@@ -118,7 +118,7 @@
 }
 
 void* WearSwipeDetector::touch_thread(void* cookie) {
-    ((WearSwipeDetector*)cookie)->run();
+    (static_cast<WearSwipeDetector*>(cookie))->run();
     return NULL;
 }