Merge "tests: Add the missing dependency on libhidlbase." into oc-mr1-dev
am: 1f4229ff95
Change-Id: I80603def879d1837bdcd6ec48aeeef22b635fa00
diff --git a/applypatch/Android.mk b/applypatch/Android.mk
index a7412d2..7aed0a9 100644
--- a/applypatch/Android.mk
+++ b/applypatch/Android.mk
@@ -127,7 +127,8 @@
# libbsdiff is compiled with -D_FILE_OFFSET_BITS=64.
libimgdiff_cflags := \
-Werror \
- -D_FILE_OFFSET_BITS=64
+ -D_FILE_OFFSET_BITS=64 \
+ -DZLIB_CONST
libimgdiff_static_libraries := \
libbsdiff \
diff --git a/applypatch/imgdiff.cpp b/applypatch/imgdiff.cpp
index fc24064..8802652 100644
--- a/applypatch/imgdiff.cpp
+++ b/applypatch/imgdiff.cpp
@@ -196,7 +196,8 @@
size_t DataLengthForPatch() const;
void Dump() const {
- printf("type %d start %zu len %zu\n", type_, start_, DataLengthForPatch());
+ printf("type: %d, start: %zu, len: %zu, name: %s\n", type_, start_, DataLengthForPatch(),
+ entry_name_.c_str());
}
void SetSourceInfo(const ImageChunk& other);
@@ -211,7 +212,7 @@
size_t GetHeaderSize(size_t patch_size) const;
// Return the offset of the next patch into the patch data.
- size_t WriteHeaderToFd(int fd, const std::vector<uint8_t>& patch, size_t offset);
+ size_t WriteHeaderToFd(int fd, const std::vector<uint8_t>& patch, size_t offset) const;
/*
* Cause a gzip chunk to be treated as a normal chunk (ie, as a blob
@@ -233,6 +234,14 @@
bool IsAdjacentNormal(const ImageChunk& other) const;
void MergeAdjacentNormal(const ImageChunk& other);
+ /*
+ * Compute a bsdiff patch between |this| and the input source chunks.
+ * Store the result in the patch_data.
+ * |bsdiff_cache| can be used to cache the suffix array if the same |src| chunk is used
+ * repeatedly, pass nullptr if not needed.
+ */
+ bool MakePatch(const ImageChunk& src, std::vector<uint8_t>* patch_data, saidx_t** bsdiff_cache);
+
private:
int type_; // CHUNK_NORMAL, CHUNK_DEFLATE, CHUNK_RAW
size_t start_; // offset of chunk in the original input file
@@ -322,7 +331,7 @@
void ImageChunk::ChangeDeflateChunkToNormal() {
if (type_ != CHUNK_DEFLATE) return;
type_ = CHUNK_NORMAL;
- entry_name_.clear();
+ // No need to clear the entry name.
uncompressed_data_.clear();
}
@@ -345,7 +354,7 @@
}
}
-size_t ImageChunk::WriteHeaderToFd(int fd, const std::vector<uint8_t>& patch, size_t offset) {
+size_t ImageChunk::WriteHeaderToFd(int fd, const std::vector<uint8_t>& patch, size_t offset) const {
Write4(fd, type_);
switch (type_) {
case CHUNK_NORMAL:
@@ -393,6 +402,68 @@
raw_data_len_ = raw_data_len_ + other.raw_data_len_;
}
+bool ImageChunk::MakePatch(const ImageChunk& src, std::vector<uint8_t>* patch_data,
+ saidx_t** bsdiff_cache) {
+ if (ChangeChunkToRaw(0)) {
+ size_t patch_size = DataLengthForPatch();
+ patch_data->resize(patch_size);
+ std::copy(DataForPatch(), DataForPatch() + patch_size, patch_data->begin());
+ return true;
+ }
+
+#if defined(__ANDROID__)
+ char ptemp[] = "/data/local/tmp/imgdiff-patch-XXXXXX";
+#else
+ char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
+#endif
+
+ int fd = mkstemp(ptemp);
+ if (fd == -1) {
+ printf("MakePatch failed to create a temporary file: %s\n", strerror(errno));
+ return false;
+ }
+ close(fd);
+
+ int r = bsdiff::bsdiff(src.DataForPatch(), src.DataLengthForPatch(), DataForPatch(),
+ DataLengthForPatch(), ptemp, bsdiff_cache);
+ if (r != 0) {
+ printf("bsdiff() failed: %d\n", r);
+ return false;
+ }
+
+ android::base::unique_fd patch_fd(open(ptemp, O_RDONLY));
+ if (patch_fd == -1) {
+ printf("failed to open %s: %s\n", ptemp, strerror(errno));
+ return false;
+ }
+ struct stat st;
+ if (fstat(patch_fd, &st) != 0) {
+ printf("failed to stat patch file %s: %s\n", ptemp, strerror(errno));
+ return false;
+ }
+
+ size_t sz = static_cast<size_t>(st.st_size);
+ // Change the chunk type to raw if the patch takes less space that way.
+ if (ChangeChunkToRaw(sz)) {
+ unlink(ptemp);
+ size_t patch_size = DataLengthForPatch();
+ patch_data->resize(patch_size);
+ std::copy(DataForPatch(), DataForPatch() + patch_size, patch_data->begin());
+ return true;
+ }
+ patch_data->resize(sz);
+ if (!android::base::ReadFully(patch_fd, patch_data->data(), sz)) {
+ printf("failed to read \"%s\" %s\n", ptemp, strerror(errno));
+ unlink(ptemp);
+ return false;
+ }
+
+ unlink(ptemp);
+ SetSourceInfo(src);
+
+ return true;
+}
+
bool ImageChunk::ReconstructDeflateChunk() {
if (type_ != CHUNK_DEFLATE) {
printf("attempt to reconstruct non-deflate chunk\n");
@@ -458,29 +529,347 @@
return true;
}
+// Interface for zip_mode and image_mode images. We initialize the image from an input file and
+// split the file content into a list of image chunks.
+class Image {
+ public:
+ explicit Image(bool is_source) : is_source_(is_source) {}
+
+ virtual ~Image() {}
+
+ // Create a list of image chunks from input file.
+ virtual bool Initialize(const std::string& filename) = 0;
+
+ // Look for runs of adjacent normal chunks and compress them down into a single chunk. (Such
+ // runs can be produced when deflate chunks are changed to normal chunks.)
+ void MergeAdjacentNormalChunks();
+
+ // In zip mode, find the matching deflate source chunk by entry name. Search for normal chunks
+ // also if |find_normal| is true.
+ ImageChunk* FindChunkByName(const std::string& name, bool find_normal = false);
+
+ // Write the contents of |patch_data| to |patch_fd|.
+ bool WritePatchDataToFd(const std::vector<std::vector<uint8_t>>& patch_data, int patch_fd) const;
+
+ void DumpChunks() const;
+
+ // Non const iterators to access the stored ImageChunks.
+ std::vector<ImageChunk>::iterator begin() {
+ return chunks_.begin();
+ }
+
+ std::vector<ImageChunk>::iterator end() {
+ return chunks_.end();
+ }
+ // Return a pointer to the ith ImageChunk.
+ ImageChunk* Get(size_t i) {
+ CHECK_LT(i, chunks_.size());
+ return &chunks_[i];
+ }
+
+ size_t NumOfChunks() const {
+ return chunks_.size();
+ }
+
+ protected:
+ bool ReadFile(const std::string& filename, std::vector<uint8_t>* file_content);
+
+ bool is_source_; // True if it's for source chunks.
+ std::vector<ImageChunk> chunks_; // Internal storage of ImageChunk.
+ std::vector<uint8_t> file_content_; // Store the whole input file in memory.
+};
+
+void Image::MergeAdjacentNormalChunks() {
+ size_t merged_last = 0, cur = 0;
+ while (cur < chunks_.size()) {
+ // Look for normal chunks adjacent to the current one. If such chunk exists, extend the
+ // length of the current normal chunk.
+ size_t to_check = cur + 1;
+ while (to_check < chunks_.size() && chunks_[cur].IsAdjacentNormal(chunks_[to_check])) {
+ chunks_[cur].MergeAdjacentNormal(chunks_[to_check]);
+ to_check++;
+ }
+
+ if (merged_last != cur) {
+ chunks_[merged_last] = std::move(chunks_[cur]);
+ }
+ merged_last++;
+ cur = to_check;
+ }
+ if (merged_last < chunks_.size()) {
+ chunks_.erase(chunks_.begin() + merged_last, chunks_.end());
+ }
+}
+
+ImageChunk* Image::FindChunkByName(const std::string& name, bool find_normal) {
+ if (name.empty()) {
+ return nullptr;
+ }
+ for (auto& chunk : chunks_) {
+ if ((chunk.GetType() == CHUNK_DEFLATE || find_normal) && chunk.GetEntryName() == name) {
+ return &chunk;
+ }
+ }
+ return nullptr;
+}
+
+bool Image::WritePatchDataToFd(const std::vector<std::vector<uint8_t>>& patch_data,
+ int patch_fd) const {
+ // Figure out how big the imgdiff file header is going to be, so that we can correctly compute
+ // the offset of each bsdiff patch within the file.
+ CHECK_EQ(chunks_.size(), patch_data.size());
+ size_t total_header_size = 12;
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ total_header_size += chunks_[i].GetHeaderSize(patch_data[i].size());
+ }
+
+ size_t offset = total_header_size;
+
+ // Write out the headers.
+ if (!android::base::WriteStringToFd("IMGDIFF2", patch_fd)) {
+ printf("failed to write \"IMGDIFF2\": %s\n", strerror(errno));
+ return false;
+ }
+ Write4(patch_fd, static_cast<int32_t>(chunks_.size()));
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ printf("chunk %zu: ", i);
+ offset = chunks_[i].WriteHeaderToFd(patch_fd, patch_data[i], offset);
+ }
+
+ // Append each chunk's bsdiff patch, in order.
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ if (chunks_[i].GetType() != CHUNK_RAW) {
+ if (!android::base::WriteFully(patch_fd, patch_data[i].data(), patch_data[i].size())) {
+ printf("failed to write %zu bytes patch for chunk %zu\n", patch_data[i].size(), i);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void Image::DumpChunks() const {
+ std::string type = is_source_ ? "source" : "target";
+ printf("Dumping chunks for %s\n", type.c_str());
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ printf("chunk %zu: ", i);
+ chunks_[i].Dump();
+ }
+}
+
+bool Image::ReadFile(const std::string& filename, std::vector<uint8_t>* file_content) {
+ CHECK(file_content != nullptr);
+
+ android::base::unique_fd fd(open(filename.c_str(), O_RDONLY));
+ if (fd == -1) {
+ printf("failed to open \"%s\" %s\n", filename.c_str(), strerror(errno));
+ return false;
+ }
+ struct stat st;
+ if (fstat(fd, &st) != 0) {
+ printf("failed to stat \"%s\": %s\n", filename.c_str(), strerror(errno));
+ return false;
+ }
+
+ size_t sz = static_cast<size_t>(st.st_size);
+ file_content->resize(sz);
+ if (!android::base::ReadFully(fd, file_content->data(), sz)) {
+ printf("failed to read \"%s\" %s\n", filename.c_str(), strerror(errno));
+ return false;
+ }
+ fd.reset();
+
+ return true;
+}
+
+class ZipModeImage : public Image {
+ public:
+ explicit ZipModeImage(bool is_source) : Image(is_source) {}
+
+ bool Initialize(const std::string& filename) override;
+
+ const ImageChunk& PseudoSource() const {
+ CHECK(is_source_);
+ CHECK(pseudo_source_ != nullptr);
+ return *pseudo_source_;
+ }
+
+ // Verify that we can reconstruct the deflate chunks; also change the type to CHUNK_NORMAL if
+ // src and tgt are identical.
+ static bool CheckAndProcessChunks(ZipModeImage* tgt_image, ZipModeImage* src_image);
+
+ // Compute the patches against the input image, and write the data into |patch_name|.
+ static bool GeneratePatches(ZipModeImage* tgt_image, ZipModeImage* src_image,
+ const std::string& patch_name);
+
+ private:
+ // Initialize image chunks based on the zip entries.
+ bool InitializeChunks(const std::string& filename, ZipArchiveHandle handle);
+ // Add the a zip entry to the list.
+ bool AddZipEntryToChunks(ZipArchiveHandle handle, const std::string& entry_name, ZipEntry* entry);
+ // Return the real size of the zip file. (omit the trailing zeros that used for alignment)
+ bool GetZipFileSize(size_t* input_file_size);
+
+ // The pesudo source chunk for bsdiff if there's no match for the given target chunk. It's in
+ // fact the whole source file.
+ std::unique_ptr<ImageChunk> pseudo_source_;
+};
+
+bool ZipModeImage::Initialize(const std::string& filename) {
+ if (!ReadFile(filename, &file_content_)) {
+ return false;
+ }
+
+ // Omit the trailing zeros before we pass the file to ziparchive handler.
+ size_t zipfile_size;
+ if (!GetZipFileSize(&zipfile_size)) {
+ printf("failed to parse the actual size of %s\n", filename.c_str());
+ return false;
+ }
+ ZipArchiveHandle handle;
+ int err = OpenArchiveFromMemory(const_cast<uint8_t*>(file_content_.data()), zipfile_size,
+ filename.c_str(), &handle);
+ if (err != 0) {
+ printf("failed to open zip file %s: %s\n", filename.c_str(), ErrorCodeString(err));
+ CloseArchive(handle);
+ return false;
+ }
+
+ if (is_source_) {
+ pseudo_source_ = std::make_unique<ImageChunk>(CHUNK_NORMAL, 0, &file_content_, zipfile_size);
+ }
+ if (!InitializeChunks(filename, handle)) {
+ CloseArchive(handle);
+ return false;
+ }
+
+ CloseArchive(handle);
+ return true;
+}
+
+// Iterate the zip entries and compose the image chunks accordingly.
+bool ZipModeImage::InitializeChunks(const std::string& filename, ZipArchiveHandle handle) {
+ void* cookie;
+ int ret = StartIteration(handle, &cookie, nullptr, nullptr);
+ if (ret != 0) {
+ printf("failed to iterate over entries in %s: %s\n", filename.c_str(), ErrorCodeString(ret));
+ return false;
+ }
+
+ // Create a list of deflated zip entries, sorted by offset.
+ std::vector<std::pair<std::string, ZipEntry>> temp_entries;
+ ZipString name;
+ ZipEntry entry;
+ while ((ret = Next(cookie, &entry, &name)) == 0) {
+ if (entry.method == kCompressDeflated) {
+ std::string entry_name(name.name, name.name + name.name_length);
+ temp_entries.emplace_back(entry_name, entry);
+ }
+ }
+
+ if (ret != -1) {
+ printf("Error while iterating over zip entries: %s\n", ErrorCodeString(ret));
+ return false;
+ }
+ std::sort(temp_entries.begin(), temp_entries.end(),
+ [](auto& entry1, auto& entry2) { return entry1.second.offset < entry2.second.offset; });
+
+ EndIteration(cookie);
+
+ // For source chunks, we don't need to compose chunks for the metadata.
+ if (is_source_) {
+ for (auto& entry : temp_entries) {
+ if (!AddZipEntryToChunks(handle, entry.first, &entry.second)) {
+ printf("Failed to add %s to source chunks\n", entry.first.c_str());
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // For target chunks, add the deflate entries as CHUNK_DEFLATE and the contents between two
+ // deflate entries as CHUNK_NORMAL.
+ size_t pos = 0;
+ size_t nextentry = 0;
+ while (pos < file_content_.size()) {
+ if (nextentry < temp_entries.size() &&
+ static_cast<off64_t>(pos) == temp_entries[nextentry].second.offset) {
+ // Add the next zip entry.
+ std::string entry_name = temp_entries[nextentry].first;
+ if (!AddZipEntryToChunks(handle, entry_name, &temp_entries[nextentry].second)) {
+ printf("Failed to add %s to target chunks\n", entry_name.c_str());
+ return false;
+ }
+
+ pos += temp_entries[nextentry].second.compressed_length;
+ ++nextentry;
+ continue;
+ }
+
+ // Use a normal chunk to take all the data up to the start of the next entry.
+ size_t raw_data_len;
+ if (nextentry < temp_entries.size()) {
+ raw_data_len = temp_entries[nextentry].second.offset - pos;
+ } else {
+ raw_data_len = file_content_.size() - pos;
+ }
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, raw_data_len);
+
+ pos += raw_data_len;
+ }
+
+ return true;
+}
+
+bool ZipModeImage::AddZipEntryToChunks(ZipArchiveHandle handle, const std::string& entry_name,
+ ZipEntry* entry) {
+ size_t compressed_len = entry->compressed_length;
+ if (entry->method == kCompressDeflated) {
+ size_t uncompressed_len = entry->uncompressed_length;
+ std::vector<uint8_t> uncompressed_data(uncompressed_len);
+ int ret = ExtractToMemory(handle, entry, uncompressed_data.data(), uncompressed_len);
+ if (ret != 0) {
+ printf("failed to extract %s with size %zu: %s\n", entry_name.c_str(), uncompressed_len,
+ ErrorCodeString(ret));
+ return false;
+ }
+ ImageChunk curr(CHUNK_DEFLATE, entry->offset, &file_content_, compressed_len);
+ curr.SetEntryName(entry_name);
+ curr.SetUncompressedData(std::move(uncompressed_data));
+ chunks_.push_back(curr);
+ } else {
+ ImageChunk curr(CHUNK_NORMAL, entry->offset, &file_content_, compressed_len);
+ curr.SetEntryName(entry_name);
+ chunks_.push_back(curr);
+ }
+
+ return true;
+}
+
// EOCD record
// offset 0: signature 0x06054b50, 4 bytes
// offset 4: number of this disk, 2 bytes
// ...
// offset 20: comment length, 2 bytes
// offset 22: comment, n bytes
-static bool GetZipFileSize(const std::vector<uint8_t>& zip_file, size_t* input_file_size) {
- if (zip_file.size() < 22) {
+bool ZipModeImage::GetZipFileSize(size_t* input_file_size) {
+ if (file_content_.size() < 22) {
printf("file is too small to be a zip file\n");
return false;
}
// Look for End of central directory record of the zip file, and calculate the actual
// zip_file size.
- for (int i = zip_file.size() - 22; i >= 0; i--) {
- if (zip_file[i] == 0x50) {
- if (get_unaligned<uint32_t>(&zip_file[i]) == 0x06054b50) {
+ for (int i = file_content_.size() - 22; i >= 0; i--) {
+ if (file_content_[i] == 0x50) {
+ if (get_unaligned<uint32_t>(&file_content_[i]) == 0x06054b50) {
// double-check: this archive consists of a single "disk".
- CHECK_EQ(get_unaligned<uint16_t>(&zip_file[i + 4]), 0);
+ CHECK_EQ(get_unaligned<uint16_t>(&file_content_[i + 4]), 0);
- uint16_t comment_length = get_unaligned<uint16_t>(&zip_file[i + 20]);
+ uint16_t comment_length = get_unaligned<uint16_t>(&file_content_[i + 20]);
size_t file_size = i + 22 + comment_length;
- CHECK_LE(file_size, zip_file.size());
+ CHECK_LE(file_size, file_content_.size());
*input_file_size = file_size;
return true;
}
@@ -491,162 +880,116 @@
return false;
}
-static bool ReadZip(const char* filename, std::vector<ImageChunk>* chunks,
- std::vector<uint8_t>* zip_file, bool include_pseudo_chunk) {
- CHECK(chunks != nullptr && zip_file != nullptr);
-
- android::base::unique_fd fd(open(filename, O_RDONLY));
- if (fd == -1) {
- printf("failed to open \"%s\" %s\n", filename, strerror(errno));
- return false;
- }
- struct stat st;
- if (fstat(fd, &st) != 0) {
- printf("failed to stat \"%s\": %s\n", filename, strerror(errno));
- return false;
- }
-
- size_t sz = static_cast<size_t>(st.st_size);
- zip_file->resize(sz);
- if (!android::base::ReadFully(fd, zip_file->data(), sz)) {
- printf("failed to read \"%s\" %s\n", filename, strerror(errno));
- return false;
- }
- fd.reset();
-
- // Trim the trailing zeros before we pass the file to ziparchive handler.
- size_t zipfile_size;
- if (!GetZipFileSize(*zip_file, &zipfile_size)) {
- printf("failed to parse the actual size of %s\n", filename);
- return false;
- }
- ZipArchiveHandle handle;
- int err = OpenArchiveFromMemory(zip_file->data(), zipfile_size, filename, &handle);
- if (err != 0) {
- printf("failed to open zip file %s: %s\n", filename, ErrorCodeString(err));
- CloseArchive(handle);
- return false;
- }
-
- // Create a list of deflated zip entries, sorted by offset.
- std::vector<std::pair<std::string, ZipEntry>> temp_entries;
- void* cookie;
- int ret = StartIteration(handle, &cookie, nullptr, nullptr);
- if (ret != 0) {
- printf("failed to iterate over entries in %s: %s\n", filename, ErrorCodeString(ret));
- CloseArchive(handle);
- return false;
- }
-
- ZipString name;
- ZipEntry entry;
- while ((ret = Next(cookie, &entry, &name)) == 0) {
- if (entry.method == kCompressDeflated) {
- std::string entryname(name.name, name.name + name.name_length);
- temp_entries.push_back(std::make_pair(entryname, entry));
- }
- }
-
- if (ret != -1) {
- printf("Error while iterating over zip entries: %s\n", ErrorCodeString(ret));
- CloseArchive(handle);
- return false;
- }
- std::sort(temp_entries.begin(), temp_entries.end(),
- [](auto& entry1, auto& entry2) {
- return entry1.second.offset < entry2.second.offset;
- });
-
- EndIteration(cookie);
-
- if (include_pseudo_chunk) {
- chunks->emplace_back(CHUNK_NORMAL, 0, zip_file, zip_file->size());
- }
-
- size_t pos = 0;
- size_t nextentry = 0;
- while (pos < zip_file->size()) {
- if (nextentry < temp_entries.size() &&
- static_cast<off64_t>(pos) == temp_entries[nextentry].second.offset) {
- // compose the next deflate chunk.
- std::string entryname = temp_entries[nextentry].first;
- size_t uncompressed_len = temp_entries[nextentry].second.uncompressed_length;
- std::vector<uint8_t> uncompressed_data(uncompressed_len);
- if ((ret = ExtractToMemory(handle, &temp_entries[nextentry].second, uncompressed_data.data(),
- uncompressed_len)) != 0) {
- printf("failed to extract %s with size %zu: %s\n", entryname.c_str(), uncompressed_len,
- ErrorCodeString(ret));
- CloseArchive(handle);
- return false;
- }
-
- size_t compressed_len = temp_entries[nextentry].second.compressed_length;
- ImageChunk curr(CHUNK_DEFLATE, pos, zip_file, compressed_len);
- curr.SetEntryName(std::move(entryname));
- curr.SetUncompressedData(std::move(uncompressed_data));
- chunks->push_back(curr);
-
- pos += compressed_len;
- ++nextentry;
+bool ZipModeImage::CheckAndProcessChunks(ZipModeImage* tgt_image, ZipModeImage* src_image) {
+ for (auto& tgt_chunk : *tgt_image) {
+ if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
continue;
}
- // Use a normal chunk to take all the data up to the start of the next deflate section.
- size_t raw_data_len;
- if (nextentry < temp_entries.size()) {
- raw_data_len = temp_entries[nextentry].second.offset - pos;
- } else {
- raw_data_len = zip_file->size() - pos;
- }
- chunks->emplace_back(CHUNK_NORMAL, pos, zip_file, raw_data_len);
+ ImageChunk* src_chunk = src_image->FindChunkByName(tgt_chunk.GetEntryName());
+ if (src_chunk == nullptr) {
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ } else if (tgt_chunk == *src_chunk) {
+ // If two deflate chunks are identical (eg, the kernel has not changed between two builds),
+ // treat them as normal chunks. This makes applypatch much faster -- it can apply a trivial
+ // patch to the compressed data, rather than uncompressing and recompressing to apply the
+ // trivial patch to the uncompressed data.
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk->ChangeDeflateChunkToNormal();
+ } else if (!tgt_chunk.ReconstructDeflateChunk()) {
+ // We cannot recompress the data and get exactly the same bits as are in the input target
+ // image. Treat the chunk as a normal non-deflated chunk.
+ printf("failed to reconstruct target deflate chunk [%s]; treating as normal\n",
+ tgt_chunk.GetEntryName().c_str());
- pos += raw_data_len;
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk->ChangeDeflateChunkToNormal();
+ }
}
- CloseArchive(handle);
return true;
}
-// Read the given file and break it up into chunks, and putting the data in to a vector.
-static bool ReadImage(const char* filename, std::vector<ImageChunk>* chunks,
- std::vector<uint8_t>* img) {
- CHECK(chunks != nullptr && img != nullptr);
+bool ZipModeImage::GeneratePatches(ZipModeImage* tgt_image, ZipModeImage* src_image,
+ const std::string& patch_name) {
+ // For zips, we only need merge normal chunks for the target: deflated chunks are matched via
+ // filename, and normal chunks are patched using the entire source file as the source.
+ tgt_image->MergeAdjacentNormalChunks();
+ tgt_image->DumpChunks();
- android::base::unique_fd fd(open(filename, O_RDONLY));
- if (fd == -1) {
- printf("failed to open \"%s\" %s\n", filename, strerror(errno));
- return false;
+ printf("Construct patches for %zu chunks...\n", tgt_image->NumOfChunks());
+ std::vector<std::vector<uint8_t>> patch_data(tgt_image->NumOfChunks());
+
+ saidx_t* bsdiff_cache = nullptr;
+ size_t i = 0;
+ for (auto& tgt_chunk : *tgt_image) {
+ ImageChunk* src_chunk = (tgt_chunk.GetType() != CHUNK_DEFLATE)
+ ? nullptr
+ : src_image->FindChunkByName(tgt_chunk.GetEntryName());
+
+ const auto& src_ref = (src_chunk == nullptr) ? src_image->PseudoSource() : *src_chunk;
+ saidx_t** bsdiff_cache_ptr = (src_chunk == nullptr) ? &bsdiff_cache : nullptr;
+
+ if (!tgt_chunk.MakePatch(src_ref, &patch_data[i], bsdiff_cache_ptr)) {
+ printf("Failed to generate patch, name: %s\n", tgt_chunk.GetEntryName().c_str());
+ return false;
+ }
+
+ printf("patch %3zu is %zu bytes (of %zu)\n", i, patch_data[i].size(),
+ tgt_chunk.GetRawDataLength());
+ i++;
}
- struct stat st;
- if (fstat(fd, &st) != 0) {
- printf("failed to stat \"%s\": %s\n", filename, strerror(errno));
+ free(bsdiff_cache);
+
+ android::base::unique_fd patch_fd(
+ open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
+ if (patch_fd == -1) {
+ printf("failed to open \"%s\": %s\n", patch_name.c_str(), strerror(errno));
return false;
}
- size_t sz = static_cast<size_t>(st.st_size);
- img->resize(sz);
- if (!android::base::ReadFully(fd, img->data(), sz)) {
- printf("failed to read \"%s\" %s\n", filename, strerror(errno));
+ return tgt_image->WritePatchDataToFd(patch_data, patch_fd);
+}
+
+class ImageModeImage : public Image {
+ public:
+ explicit ImageModeImage(bool is_source) : Image(is_source) {}
+
+ // Initialize the image chunks list by searching the magic numbers in an image file.
+ bool Initialize(const std::string& filename) override;
+
+ // In Image Mode, verify that the source and target images have the same chunk structure (ie, the
+ // same sequence of deflate and normal chunks).
+ static bool CheckAndProcessChunks(ImageModeImage* tgt_image, ImageModeImage* src_image);
+
+ // In image mode, generate patches against the given source chunks and bonus_data; write the
+ // result to |patch_name|.
+ static bool GeneratePatches(ImageModeImage* tgt_image, ImageModeImage* src_image,
+ const std::vector<uint8_t>& bonus_data, const std::string& patch_name);
+};
+
+bool ImageModeImage::Initialize(const std::string& filename) {
+ if (!ReadFile(filename, &file_content_)) {
return false;
}
+ size_t sz = file_content_.size();
size_t pos = 0;
-
while (pos < sz) {
// 0x00 no header flags, 0x08 deflate compression, 0x1f8b gzip magic number
- if (sz - pos >= 4 && get_unaligned<uint32_t>(img->data() + pos) == 0x00088b1f) {
+ if (sz - pos >= 4 && get_unaligned<uint32_t>(file_content_.data() + pos) == 0x00088b1f) {
// 'pos' is the offset of the start of a gzip chunk.
size_t chunk_offset = pos;
// The remaining data is too small to be a gzip chunk; treat them as a normal chunk.
if (sz - pos < GZIP_HEADER_LEN + GZIP_FOOTER_LEN) {
- chunks->emplace_back(CHUNK_NORMAL, pos, img, sz - pos);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, sz - pos);
break;
}
// We need three chunks for the deflated image in total, one normal chunk for the header,
// one deflated chunk for the body, and another normal chunk for the footer.
- chunks->emplace_back(CHUNK_NORMAL, pos, img, GZIP_HEADER_LEN);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_HEADER_LEN);
pos += GZIP_HEADER_LEN;
// We must decompress this chunk in order to discover where it ends, and so we can update
@@ -657,7 +1000,7 @@
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = sz - pos;
- strm.next_in = img->data() + pos;
+ strm.next_in = file_content_.data() + pos;
// -15 means we are decoding a 'raw' deflate stream; zlib will
// not expect zlib headers.
@@ -700,22 +1043,22 @@
printf("Warning: invalid footer position; treating as a nomal chunk\n");
continue;
}
- size_t footer_size = get_unaligned<uint32_t>(img->data() + footer_index);
+ size_t footer_size = get_unaligned<uint32_t>(file_content_.data() + footer_index);
if (footer_size != uncompressed_len) {
printf("Warning: footer size %zu != decompressed size %zu; treating as a nomal chunk\n",
footer_size, uncompressed_len);
continue;
}
- ImageChunk body(CHUNK_DEFLATE, pos, img, raw_data_len);
+ ImageChunk body(CHUNK_DEFLATE, pos, &file_content_, raw_data_len);
uncompressed_data.resize(uncompressed_len);
body.SetUncompressedData(std::move(uncompressed_data));
- chunks->push_back(body);
+ chunks_.push_back(body);
pos += raw_data_len;
// create a normal chunk for the footer
- chunks->emplace_back(CHUNK_NORMAL, pos, img, GZIP_FOOTER_LEN);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_FOOTER_LEN);
pos += GZIP_FOOTER_LEN;
} else {
@@ -726,12 +1069,12 @@
size_t data_len = 0;
while (data_len + pos < sz) {
if (data_len + pos + 4 <= sz &&
- get_unaligned<uint32_t>(img->data() + pos + data_len) == 0x00088b1f) {
+ get_unaligned<uint32_t>(file_content_.data() + pos + data_len) == 0x00088b1f) {
break;
}
data_len++;
}
- chunks->emplace_back(CHUNK_NORMAL, pos, img, data_len);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, data_len);
pos += data_len;
}
@@ -740,348 +1083,180 @@
return true;
}
-/*
- * Given source and target chunks, compute a bsdiff patch between them.
- * Store the result in the patch_data.
- * |bsdiff_cache| can be used to cache the suffix array if the same |src| chunk
- * is used repeatedly, pass nullptr if not needed.
- */
-static bool MakePatch(const ImageChunk* src, ImageChunk* tgt, std::vector<uint8_t>* patch_data,
- saidx_t** bsdiff_cache) {
- if (tgt->ChangeChunkToRaw(0)) {
- size_t patch_size = tgt->DataLengthForPatch();
- patch_data->resize(patch_size);
- std::copy(tgt->DataForPatch(), tgt->DataForPatch() + patch_size, patch_data->begin());
- return true;
- }
+// In Image Mode, verify that the source and target images have the same chunk structure (ie, the
+// same sequence of deflate and normal chunks).
+bool ImageModeImage::CheckAndProcessChunks(ImageModeImage* tgt_image, ImageModeImage* src_image) {
+ // In image mode, merge the gzip header and footer in with any adjacent normal chunks.
+ tgt_image->MergeAdjacentNormalChunks();
+ src_image->MergeAdjacentNormalChunks();
-#if defined(__ANDROID__)
- char ptemp[] = "/data/local/tmp/imgdiff-patch-XXXXXX";
-#else
- char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
-#endif
-
- int fd = mkstemp(ptemp);
- if (fd == -1) {
- printf("MakePatch failed to create a temporary file: %s\n", strerror(errno));
+ if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
+ printf("source and target don't have same number of chunks!\n");
+ tgt_image->DumpChunks();
+ src_image->DumpChunks();
return false;
}
- close(fd);
-
- int r = bsdiff::bsdiff(src->DataForPatch(), src->DataLengthForPatch(), tgt->DataForPatch(),
- tgt->DataLengthForPatch(), ptemp, bsdiff_cache);
- if (r != 0) {
- printf("bsdiff() failed: %d\n", r);
- return false;
+ for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
+ if (tgt_image->Get(i)->GetType() != src_image->Get(i)->GetType()) {
+ printf("source and target don't have same chunk structure! (chunk %zu)\n", i);
+ tgt_image->DumpChunks();
+ src_image->DumpChunks();
+ return false;
+ }
}
- android::base::unique_fd patch_fd(open(ptemp, O_RDONLY));
- if (patch_fd == -1) {
- printf("failed to open %s: %s\n", ptemp, strerror(errno));
- return false;
- }
- struct stat st;
- if (fstat(patch_fd, &st) != 0) {
- printf("failed to stat patch file %s: %s\n", ptemp, strerror(errno));
- return false;
+ for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
+ auto& tgt_chunk = *tgt_image->Get(i);
+ auto& src_chunk = *src_image->Get(i);
+ if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
+ continue;
+ }
+
+ // Confirm that we can recompress the data and get exactly the same bits as are in the
+ // input target image.
+ if (!tgt_chunk.ReconstructDeflateChunk()) {
+ printf("failed to reconstruct target deflate chunk %zu [%s]; treating as normal\n", i,
+ tgt_chunk.GetEntryName().c_str());
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk.ChangeDeflateChunkToNormal();
+ continue;
+ }
+
+ // If two deflate chunks are identical treat them as normal chunks.
+ if (tgt_chunk == src_chunk) {
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk.ChangeDeflateChunkToNormal();
+ }
}
- size_t sz = static_cast<size_t>(st.st_size);
- // Change the chunk type to raw if the patch takes less space that way.
- if (tgt->ChangeChunkToRaw(sz)) {
- unlink(ptemp);
- size_t patch_size = tgt->DataLengthForPatch();
- patch_data->resize(patch_size);
- std::copy(tgt->DataForPatch(), tgt->DataForPatch() + patch_size, patch_data->begin());
- return true;
- }
- patch_data->resize(sz);
- if (!android::base::ReadFully(patch_fd, patch_data->data(), sz)) {
- printf("failed to read \"%s\" %s\n", ptemp, strerror(errno));
+ // For images, we need to maintain the parallel structure of the chunk lists, so do the merging
+ // in both the source and target lists.
+ tgt_image->MergeAdjacentNormalChunks();
+ src_image->MergeAdjacentNormalChunks();
+ if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
+ // This shouldn't happen.
+ printf("merging normal chunks went awry\n");
return false;
}
- unlink(ptemp);
- tgt->SetSourceInfo(*src);
-
return true;
}
-/*
- * Look for runs of adjacent normal chunks and compress them down into
- * a single chunk. (Such runs can be produced when deflate chunks are
- * changed to normal chunks.)
- */
-static void MergeAdjacentNormalChunks(std::vector<ImageChunk>* chunks) {
- size_t merged_last = 0, cur = 0;
- while (cur < chunks->size()) {
- // Look for normal chunks adjacent to the current one. If such chunk exists, extend the
- // length of the current normal chunk.
- size_t to_check = cur + 1;
- while (to_check < chunks->size() && chunks->at(cur).IsAdjacentNormal(chunks->at(to_check))) {
- chunks->at(cur).MergeAdjacentNormal(chunks->at(to_check));
- to_check++;
+// In image mode, generate patches against the given source chunks and bonus_data; write the
+// result to |patch_name|.
+bool ImageModeImage::GeneratePatches(ImageModeImage* tgt_image, ImageModeImage* src_image,
+ const std::vector<uint8_t>& bonus_data,
+ const std::string& patch_name) {
+ printf("Construct patches for %zu chunks...\n", tgt_image->NumOfChunks());
+ std::vector<std::vector<uint8_t>> patch_data(tgt_image->NumOfChunks());
+
+ for (size_t i = 0; i < tgt_image->NumOfChunks(); i++) {
+ auto& tgt_chunk = *tgt_image->Get(i);
+ auto& src_chunk = *src_image->Get(i);
+
+ if (i == 1 && !bonus_data.empty()) {
+ printf(" using %zu bytes of bonus data for chunk %zu\n", bonus_data.size(), i);
+ src_chunk.SetBonusData(bonus_data);
}
- if (merged_last != cur) {
- chunks->at(merged_last) = std::move(chunks->at(cur));
+ if (!tgt_chunk.MakePatch(src_chunk, &patch_data[i], nullptr)) {
+ printf("Failed to generate patch for target chunk %zu: ", i);
+ return false;
}
- merged_last++;
- cur = to_check;
+ printf("patch %3zu is %zu bytes (of %zu)\n", i, patch_data[i].size(),
+ tgt_chunk.GetRawDataLength());
}
- if (merged_last < chunks->size()) {
- chunks->erase(chunks->begin() + merged_last, chunks->end());
- }
-}
-static ImageChunk* FindChunkByName(const std::string& name, std::vector<ImageChunk>& chunks) {
- for (size_t i = 0; i < chunks.size(); ++i) {
- if (chunks[i].GetType() == CHUNK_DEFLATE && chunks[i].GetEntryName() == name) {
- return &chunks[i];
- }
+ android::base::unique_fd patch_fd(
+ open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
+ if (patch_fd == -1) {
+ printf("failed to open \"%s\": %s\n", patch_name.c_str(), strerror(errno));
+ return false;
}
- return nullptr;
-}
-static void DumpChunks(const std::vector<ImageChunk>& chunks) {
- for (size_t i = 0; i < chunks.size(); ++i) {
- printf("chunk %zu: ", i);
- chunks[i].Dump();
- }
+ return tgt_image->WritePatchDataToFd(patch_data, patch_fd);
}
int imgdiff(int argc, const char** argv) {
bool zip_mode = false;
-
- if (argc >= 2 && strcmp(argv[1], "-z") == 0) {
- zip_mode = true;
- --argc;
- ++argv;
- }
-
std::vector<uint8_t> bonus_data;
- if (argc >= 3 && strcmp(argv[1], "-b") == 0) {
- android::base::unique_fd fd(open(argv[2], O_RDONLY));
- if (fd == -1) {
- printf("failed to open bonus file %s: %s\n", argv[2], strerror(errno));
- return 1;
- }
- struct stat st;
- if (fstat(fd, &st) != 0) {
- printf("failed to stat bonus file %s: %s\n", argv[2], strerror(errno));
- return 1;
- }
- size_t bonus_size = st.st_size;
- bonus_data.resize(bonus_size);
- if (!android::base::ReadFully(fd, bonus_data.data(), bonus_size)) {
- printf("failed to read bonus file %s: %s\n", argv[2], strerror(errno));
- return 1;
- }
+ int opt;
+ optind = 1; // Reset the getopt state so that we can call it multiple times for test.
- argc -= 2;
- argv += 2;
+ while ((opt = getopt(argc, const_cast<char**>(argv), "zb:")) != -1) {
+ switch (opt) {
+ case 'z':
+ zip_mode = true;
+ break;
+ case 'b': {
+ android::base::unique_fd fd(open(optarg, O_RDONLY));
+ if (fd == -1) {
+ printf("failed to open bonus file %s: %s\n", optarg, strerror(errno));
+ return 1;
+ }
+ struct stat st;
+ if (fstat(fd, &st) != 0) {
+ printf("failed to stat bonus file %s: %s\n", optarg, strerror(errno));
+ return 1;
+ }
+
+ size_t bonus_size = st.st_size;
+ bonus_data.resize(bonus_size);
+ if (!android::base::ReadFully(fd, bonus_data.data(), bonus_size)) {
+ printf("failed to read bonus file %s: %s\n", optarg, strerror(errno));
+ return 1;
+ }
+ break;
+ }
+ default:
+ printf("unexpected opt: %s\n", optarg);
+ return 2;
+ }
}
- if (argc != 4) {
- printf("usage: %s [-z] [-b <bonus-file>] <src-img> <tgt-img> <patch-file>\n",
- argv[0]);
+ if (argc - optind != 3) {
+ printf("usage: %s [-z] [-b <bonus-file>] <src-img> <tgt-img> <patch-file>\n", argv[0]);
return 2;
}
- std::vector<ImageChunk> src_chunks;
- std::vector<ImageChunk> tgt_chunks;
- std::vector<uint8_t> src_file;
- std::vector<uint8_t> tgt_file;
-
if (zip_mode) {
- if (!ReadZip(argv[1], &src_chunks, &src_file, true)) {
- printf("failed to break apart source zip file\n");
+ ZipModeImage src_image(true);
+ ZipModeImage tgt_image(false);
+
+ if (!src_image.Initialize(argv[optind])) {
return 1;
}
- if (!ReadZip(argv[2], &tgt_chunks, &tgt_file, false)) {
- printf("failed to break apart target zip file\n");
+ if (!tgt_image.Initialize(argv[optind + 1])) {
+ return 1;
+ }
+
+ if (!ZipModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
+ return 1;
+ }
+ // Compute bsdiff patches for each chunk's data (the uncompressed data, in the case of
+ // deflate chunks).
+ if (!ZipModeImage::GeneratePatches(&tgt_image, &src_image, argv[optind + 2])) {
return 1;
}
} else {
- if (!ReadImage(argv[1], &src_chunks, &src_file)) {
- printf("failed to break apart source image\n");
+ ImageModeImage src_image(true);
+ ImageModeImage tgt_image(false);
+
+ if (!src_image.Initialize(argv[optind])) {
return 1;
}
- if (!ReadImage(argv[2], &tgt_chunks, &tgt_file)) {
- printf("failed to break apart target image\n");
+ if (!tgt_image.Initialize(argv[optind + 1])) {
return 1;
}
- // Verify that the source and target images have the same chunk
- // structure (ie, the same sequence of deflate and normal chunks).
-
- // Merge the gzip header and footer in with any adjacent normal chunks.
- MergeAdjacentNormalChunks(&tgt_chunks);
- MergeAdjacentNormalChunks(&src_chunks);
-
- if (src_chunks.size() != tgt_chunks.size()) {
- printf("source and target don't have same number of chunks!\n");
- printf("source chunks:\n");
- DumpChunks(src_chunks);
- printf("target chunks:\n");
- DumpChunks(tgt_chunks);
+ if (!ImageModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
return 1;
}
- for (size_t i = 0; i < src_chunks.size(); ++i) {
- if (src_chunks[i].GetType() != tgt_chunks[i].GetType()) {
- printf("source and target don't have same chunk structure! (chunk %zu)\n", i);
- printf("source chunks:\n");
- DumpChunks(src_chunks);
- printf("target chunks:\n");
- DumpChunks(tgt_chunks);
- return 1;
- }
- }
- }
-
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- if (tgt_chunks[i].GetType() == CHUNK_DEFLATE) {
- // Confirm that given the uncompressed chunk data in the target, we
- // can recompress it and get exactly the same bits as are in the
- // input target image. If this fails, treat the chunk as a normal
- // non-deflated chunk.
- if (!tgt_chunks[i].ReconstructDeflateChunk()) {
- printf("failed to reconstruct target deflate chunk %zu [%s]; treating as normal\n", i,
- tgt_chunks[i].GetEntryName().c_str());
- tgt_chunks[i].ChangeDeflateChunkToNormal();
- if (zip_mode) {
- ImageChunk* src = FindChunkByName(tgt_chunks[i].GetEntryName(), src_chunks);
- if (src != nullptr) {
- src->ChangeDeflateChunkToNormal();
- }
- } else {
- src_chunks[i].ChangeDeflateChunkToNormal();
- }
- continue;
- }
-
- // If two deflate chunks are identical (eg, the kernel has not
- // changed between two builds), treat them as normal chunks.
- // This makes applypatch much faster -- it can apply a trivial
- // patch to the compressed data, rather than uncompressing and
- // recompressing to apply the trivial patch to the uncompressed
- // data.
- ImageChunk* src;
- if (zip_mode) {
- src = FindChunkByName(tgt_chunks[i].GetEntryName(), src_chunks);
- } else {
- src = &src_chunks[i];
- }
-
- if (src == nullptr) {
- tgt_chunks[i].ChangeDeflateChunkToNormal();
- } else if (tgt_chunks[i] == *src) {
- tgt_chunks[i].ChangeDeflateChunkToNormal();
- src->ChangeDeflateChunkToNormal();
- }
- }
- }
-
- // Merging neighboring normal chunks.
- if (zip_mode) {
- // For zips, we only need to do this to the target: deflated
- // chunks are matched via filename, and normal chunks are patched
- // using the entire source file as the source.
- MergeAdjacentNormalChunks(&tgt_chunks);
-
- } else {
- // For images, we need to maintain the parallel structure of the
- // chunk lists, so do the merging in both the source and target
- // lists.
- MergeAdjacentNormalChunks(&tgt_chunks);
- MergeAdjacentNormalChunks(&src_chunks);
- if (src_chunks.size() != tgt_chunks.size()) {
- // This shouldn't happen.
- printf("merging normal chunks went awry\n");
+ if (!ImageModeImage::GeneratePatches(&tgt_image, &src_image, bonus_data, argv[optind + 2])) {
return 1;
}
}
- // Compute bsdiff patches for each chunk's data (the uncompressed
- // data, in the case of deflate chunks).
-
- DumpChunks(src_chunks);
-
- printf("Construct patches for %zu chunks...\n", tgt_chunks.size());
- std::vector<std::vector<uint8_t>> patch_data(tgt_chunks.size());
- saidx_t* bsdiff_cache = nullptr;
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- if (zip_mode) {
- ImageChunk* src;
- if (tgt_chunks[i].GetType() == CHUNK_DEFLATE &&
- (src = FindChunkByName(tgt_chunks[i].GetEntryName(), src_chunks))) {
- if (!MakePatch(src, &tgt_chunks[i], &patch_data[i], nullptr)) {
- printf("Failed to generate patch for target chunk %zu: ", i);
- return 1;
- }
- } else {
- if (!MakePatch(&src_chunks[0], &tgt_chunks[i], &patch_data[i], &bsdiff_cache)) {
- printf("Failed to generate patch for target chunk %zu: ", i);
- return 1;
- }
- }
- } else {
- if (i == 1 && !bonus_data.empty()) {
- printf(" using %zu bytes of bonus data for chunk %zu\n", bonus_data.size(), i);
- src_chunks[i].SetBonusData(bonus_data);
- }
-
- if (!MakePatch(&src_chunks[i], &tgt_chunks[i], &patch_data[i], nullptr)) {
- printf("Failed to generate patch for target chunk %zu: ", i);
- return 1;
- }
- }
- printf("patch %3zu is %zu bytes (of %zu)\n", i, patch_data[i].size(),
- src_chunks[i].GetRawDataLength());
- }
-
- if (bsdiff_cache != nullptr) {
- free(bsdiff_cache);
- }
-
- // Figure out how big the imgdiff file header is going to be, so
- // that we can correctly compute the offset of each bsdiff patch
- // within the file.
-
- size_t total_header_size = 12;
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- total_header_size += tgt_chunks[i].GetHeaderSize(patch_data[i].size());
- }
-
- size_t offset = total_header_size;
-
- android::base::unique_fd patch_fd(open(argv[3], O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
- if (patch_fd == -1) {
- printf("failed to open \"%s\": %s\n", argv[3], strerror(errno));
- return 1;
- }
-
- // Write out the headers.
- if (!android::base::WriteStringToFd("IMGDIFF2", patch_fd)) {
- printf("failed to write \"IMGDIFF2\" to \"%s\": %s\n", argv[3], strerror(errno));
- return 1;
- }
- Write4(patch_fd, static_cast<int32_t>(tgt_chunks.size()));
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- printf("chunk %zu: ", i);
- offset = tgt_chunks[i].WriteHeaderToFd(patch_fd, patch_data[i], offset);
- }
-
- // Append each chunk's bsdiff patch, in order.
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- if (tgt_chunks[i].GetType() != CHUNK_RAW) {
- if (!android::base::WriteFully(patch_fd, patch_data[i].data(), patch_data[i].size())) {
- CHECK(false) << "failed to write " << patch_data[i].size() << " bytes patch for chunk "
- << i;
- }
- }
- }
-
return 0;
}
diff --git a/edify/Android.mk b/edify/Android.mk
index d8058c1..ffd54c2 100644
--- a/edify/Android.mk
+++ b/edify/Android.mk
@@ -34,7 +34,6 @@
LOCAL_YACCFLAGS := -v
LOCAL_CPPFLAGS += -Wno-unused-parameter
LOCAL_CPPFLAGS += -Wno-deprecated-register
-LOCAL_CLANG := true
LOCAL_C_INCLUDES += $(LOCAL_PATH)/..
LOCAL_STATIC_LIBRARIES += libbase
@@ -51,7 +50,6 @@
LOCAL_CPPFLAGS := -Wno-unused-parameter
LOCAL_CPPFLAGS += -Wno-deprecated-register
LOCAL_MODULE := libedify
-LOCAL_CLANG := true
LOCAL_C_INCLUDES += $(LOCAL_PATH)/..
LOCAL_STATIC_LIBRARIES += libbase
diff --git a/error_code.h b/error_code.h
index 9fe047c..4cbad4c 100644
--- a/error_code.h
+++ b/error_code.h
@@ -68,6 +68,8 @@
kUncryptFileCloseError,
kUncryptFileRenameError,
kUncryptPackageMissingError,
+ kUncryptRealpathFindError,
+ kUncryptBlockDeviceFindError,
};
#endif // _ERROR_CODE_H_
diff --git a/minadbd/Android.mk b/minadbd/Android.mk
index de0b0c8..8d86fd6 100644
--- a/minadbd/Android.mk
+++ b/minadbd/Android.mk
@@ -15,7 +15,6 @@
minadbd.cpp \
minadbd_services.cpp \
-LOCAL_CLANG := true
LOCAL_MODULE := libminadbd
LOCAL_CFLAGS := $(minadbd_cflags)
LOCAL_CONLY_FLAGS := -Wimplicit-function-declaration
@@ -27,7 +26,6 @@
include $(CLEAR_VARS)
-LOCAL_CLANG := true
LOCAL_MODULE := minadbd_test
LOCAL_COMPATIBILITY_SUITE := device-tests
LOCAL_SRC_FILES := fuse_adb_provider_test.cpp
diff --git a/otafault/Android.mk b/otafault/Android.mk
index ec4cdb3..7b5aab0 100644
--- a/otafault/Android.mk
+++ b/otafault/Android.mk
@@ -32,7 +32,6 @@
LOCAL_SRC_FILES := config.cpp ota_io.cpp
LOCAL_MODULE_TAGS := eng
LOCAL_MODULE := libotafault
-LOCAL_CLANG := true
LOCAL_C_INCLUDES := bootable/recovery
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
LOCAL_WHOLE_STATIC_LIBRARIES := $(otafault_static_libs)
diff --git a/otautil/DirUtil.cpp b/otautil/DirUtil.cpp
index e08e360..fffc822 100644
--- a/otautil/DirUtil.cpp
+++ b/otautil/DirUtil.cpp
@@ -16,203 +16,101 @@
#include "DirUtil.h"
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <errno.h>
#include <dirent.h>
-#include <limits.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
#include <string>
#include <selinux/label.h>
#include <selinux/selinux.h>
-typedef enum { DMISSING, DDIR, DILLEGAL } DirStatus;
+enum class DirStatus { DMISSING, DDIR, DILLEGAL };
-static DirStatus
-getPathDirStatus(const char *path)
-{
- struct stat st;
- int err;
-
- err = stat(path, &st);
- if (err == 0) {
- /* Something's there; make sure it's a directory.
- */
- if (S_ISDIR(st.st_mode)) {
- return DDIR;
- }
- errno = ENOTDIR;
- return DILLEGAL;
- } else if (errno != ENOENT) {
- /* Something went wrong, or something in the path
- * is bad. Can't do anything in this situation.
- */
- return DILLEGAL;
+static DirStatus dir_status(const std::string& path) {
+ struct stat sb;
+ if (stat(path.c_str(), &sb) == 0) {
+ // Something's there; make sure it's a directory.
+ if (S_ISDIR(sb.st_mode)) {
+ return DirStatus::DDIR;
}
- return DMISSING;
+ errno = ENOTDIR;
+ return DirStatus::DILLEGAL;
+ } else if (errno != ENOENT) {
+ // Something went wrong, or something in the path is bad. Can't do anything in this situation.
+ return DirStatus::DILLEGAL;
+ }
+ return DirStatus::DMISSING;
}
-int
-dirCreateHierarchy(const char *path, int mode,
- const struct utimbuf *timestamp, bool stripFileName,
- struct selabel_handle *sehnd)
-{
- DirStatus ds;
+int mkdir_recursively(const std::string& input_path, mode_t mode, bool strip_filename,
+ const selabel_handle* sehnd) {
+ // Check for an empty string before we bother making any syscalls.
+ if (input_path.empty()) {
+ errno = ENOENT;
+ return -1;
+ }
- /* Check for an empty string before we bother
- * making any syscalls.
- */
- if (path[0] == '\0') {
- errno = ENOENT;
- return -1;
+ // Allocate a path that we can modify; stick a slash on the end to make things easier.
+ std::string path = input_path;
+ if (strip_filename) {
+ // Strip everything after the last slash.
+ size_t pos = path.rfind('/');
+ if (pos == std::string::npos) {
+ errno = ENOENT;
+ return -1;
}
- // Allocate a path that we can modify; stick a slash on
- // the end to make things easier.
- std::string cpath = path;
- if (stripFileName) {
- // Strip everything after the last slash.
- size_t pos = cpath.rfind('/');
- if (pos == std::string::npos) {
- errno = ENOENT;
- return -1;
- }
- cpath.resize(pos + 1);
- } else {
- // Make sure that the path ends in a slash.
- cpath.push_back('/');
- }
+ path.resize(pos + 1);
+ } else {
+ // Make sure that the path ends in a slash.
+ path.push_back('/');
+ }
- /* See if it already exists.
- */
- ds = getPathDirStatus(cpath.c_str());
- if (ds == DDIR) {
- return 0;
- } else if (ds == DILLEGAL) {
- return -1;
- }
-
- /* Walk up the path from the root and make each level.
- * If a directory already exists, no big deal.
- */
- const char *path_start = &cpath[0];
- char *p = &cpath[0];
- while (*p != '\0') {
- /* Skip any slashes, watching out for the end of the string.
- */
- while (*p != '\0' && *p == '/') {
- p++;
- }
- if (*p == '\0') {
- break;
- }
-
- /* Find the end of the next path component.
- * We know that we'll see a slash before the NUL,
- * because we added it, above.
- */
- while (*p != '/') {
- p++;
- }
- *p = '\0';
-
- /* Check this part of the path and make a new directory
- * if necessary.
- */
- ds = getPathDirStatus(path_start);
- if (ds == DILLEGAL) {
- /* Could happen if some other process/thread is
- * messing with the filesystem.
- */
- return -1;
- } else if (ds == DMISSING) {
- int err;
-
- char *secontext = NULL;
-
- if (sehnd) {
- selabel_lookup(sehnd, &secontext, path_start, mode);
- setfscreatecon(secontext);
- }
-
- err = mkdir(path_start, mode);
-
- if (secontext) {
- freecon(secontext);
- setfscreatecon(NULL);
- }
-
- if (err != 0) {
- return -1;
- }
- if (timestamp != NULL && utime(path_start, timestamp)) {
- return -1;
- }
- }
- // else, this directory already exists.
-
- // Repair the path and continue.
- *p = '/';
- }
+ // See if it already exists.
+ DirStatus ds = dir_status(path);
+ if (ds == DirStatus::DDIR) {
return 0;
-}
+ } else if (ds == DirStatus::DILLEGAL) {
+ return -1;
+ }
-int
-dirUnlinkHierarchy(const char *path)
-{
- struct stat st;
- DIR *dir;
- struct dirent *de;
- int fail = 0;
-
- /* is it a file or directory? */
- if (lstat(path, &st) < 0) {
+ // Walk up the path from the root and make each level.
+ size_t prev_end = 0;
+ while (prev_end < path.size()) {
+ size_t next_end = path.find('/', prev_end + 1);
+ if (next_end == std::string::npos) {
+ break;
+ }
+ std::string dir_path = path.substr(0, next_end);
+ // Check this part of the path and make a new directory if necessary.
+ switch (dir_status(dir_path)) {
+ case DirStatus::DILLEGAL:
+ // Could happen if some other process/thread is messing with the filesystem.
return -1;
- }
-
- /* a file, so unlink it */
- if (!S_ISDIR(st.st_mode)) {
- return unlink(path);
- }
-
- /* a directory, so open handle */
- dir = opendir(path);
- if (dir == NULL) {
- return -1;
- }
-
- /* recurse over components */
- errno = 0;
- while ((de = readdir(dir)) != NULL) {
- //TODO: don't blow the stack
- char dn[PATH_MAX];
- if (!strcmp(de->d_name, "..") || !strcmp(de->d_name, ".")) {
- continue;
+ case DirStatus::DMISSING: {
+ char* secontext = nullptr;
+ if (sehnd) {
+ selabel_lookup(const_cast<selabel_handle*>(sehnd), &secontext, dir_path.c_str(), mode);
+ setfscreatecon(secontext);
}
- snprintf(dn, sizeof(dn), "%s/%s", path, de->d_name);
- if (dirUnlinkHierarchy(dn) < 0) {
- fail = 1;
- break;
+ int err = mkdir(dir_path.c_str(), mode);
+ if (secontext) {
+ freecon(secontext);
+ setfscreatecon(nullptr);
}
- errno = 0;
+ if (err != 0) {
+ return -1;
+ }
+ break;
+ }
+ default:
+ // Already exists.
+ break;
}
- /* in case readdir or unlink_recursive failed */
- if (fail || errno < 0) {
- int save = errno;
- closedir(dir);
- errno = save;
- return -1;
- }
-
- /* close directory handle */
- if (closedir(dir) < 0) {
- return -1;
- }
-
- /* delete target directory */
- return rmdir(path);
+ prev_end = next_end;
+ }
+ return 0;
}
diff --git a/otautil/DirUtil.h b/otautil/DirUtil.h
index 85b83c3..85d6c16 100644
--- a/otautil/DirUtil.h
+++ b/otautil/DirUtil.h
@@ -14,41 +14,26 @@
* limitations under the License.
*/
-#ifndef MINZIP_DIRUTIL_H_
-#define MINZIP_DIRUTIL_H_
+#ifndef OTAUTIL_DIRUTIL_H_
+#define OTAUTIL_DIRUTIL_H_
-#include <stdbool.h>
-#include <utime.h>
+#include <sys/stat.h> // mode_t
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include <string>
struct selabel_handle;
-/* Like "mkdir -p", try to guarantee that all directories
- * specified in path are present, creating as many directories
- * as necessary. The specified mode is passed to all mkdir
- * calls; no modifications are made to umask.
- *
- * If stripFileName is set, everything after the final '/'
- * is stripped before creating the directory hierarchy.
- *
- * If timestamp is non-NULL, new directories will be timestamped accordingly.
- *
- * Returns 0 on success; returns -1 (and sets errno) on failure
- * (usually if some element of path is not a directory).
- */
-int dirCreateHierarchy(const char *path, int mode,
- const struct utimbuf *timestamp, bool stripFileName,
- struct selabel_handle* sehnd);
+// Like "mkdir -p", try to guarantee that all directories specified in path are present, creating as
+// many directories as necessary. The specified mode is passed to all mkdir calls; no modifications
+// are made to umask.
+//
+// If strip_filename is set, everything after the final '/' is stripped before creating the
+// directory
+// hierarchy.
+//
+// Returns 0 on success; returns -1 (and sets errno) on failure (usually if some element of path is
+// not a directory).
+int mkdir_recursively(const std::string& path, mode_t mode, bool strip_filename,
+ const struct selabel_handle* sehnd);
-/* rm -rf <path>
- */
-int dirUnlinkHierarchy(const char *path);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // MINZIP_DIRUTIL_H_
+#endif // OTAUTIL_DIRUTIL_H_
diff --git a/recovery.cpp b/recovery.cpp
index 8f08c53..9abb934 100644
--- a/recovery.cpp
+++ b/recovery.cpp
@@ -179,19 +179,19 @@
* 7b. the user reboots (pulling the battery, etc) into the main system
*/
-// open a given path, mounting partitions as necessary
-FILE* fopen_path(const char *path, const char *mode) {
- if (ensure_path_mounted(path) != 0) {
- LOG(ERROR) << "Can't mount " << path;
- return NULL;
- }
+// Open a given path, mounting partitions as necessary.
+FILE* fopen_path(const char* path, const char* mode) {
+ if (ensure_path_mounted(path) != 0) {
+ LOG(ERROR) << "Can't mount " << path;
+ return nullptr;
+ }
- // When writing, try to create the containing directory, if necessary.
- // Use generous permissions, the system (init.rc) will reset them.
- if (strchr("wa", mode[0])) dirCreateHierarchy(path, 0777, NULL, 1, sehandle);
-
- FILE *fp = fopen(path, mode);
- return fp;
+ // When writing, try to create the containing directory, if necessary. Use generous permissions,
+ // the system (init.rc) will reset them.
+ if (strchr("wa", mode[0])) {
+ mkdir_recursively(path, 0777, true, sehandle);
+ }
+ return fopen(path, mode);
}
// close a file, log an error if the error indicator is set
@@ -594,7 +594,7 @@
if (is_cache) {
// Re-create the log dir and write back the log entries.
if (ensure_path_mounted(CACHE_LOG_DIR) == 0 &&
- dirCreateHierarchy(CACHE_LOG_DIR, 0777, nullptr, false, sehandle) == 0) {
+ mkdir_recursively(CACHE_LOG_DIR, 0777, false, sehandle) == 0) {
for (const auto& log : log_files) {
if (!android::base::WriteStringToFile(log.data, log.name, log.sb.st_mode, log.sb.st_uid,
log.sb.st_gid)) {
diff --git a/roots.cpp b/roots.cpp
index 29f55b9..fdcbfe8 100644
--- a/roots.cpp
+++ b/roots.cpp
@@ -16,162 +16,166 @@
#include "roots.h"
-#include <errno.h>
+#include <ctype.h>
+#include <fcntl.h>
#include <stdlib.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
-#include <ctype.h>
-#include <fcntl.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
#include <android-base/logging.h>
#include <android-base/properties.h>
#include <android-base/stringprintf.h>
#include <android-base/unique_fd.h>
+#include <cryptfs.h>
#include <ext4_utils/wipe.h>
#include <fs_mgr.h>
#include "common.h"
#include "mounts.h"
-#include "cryptfs.h"
-static struct fstab *fstab = NULL;
+static struct fstab* fstab = nullptr;
-extern struct selabel_handle *sehandle;
+extern struct selabel_handle* sehandle;
-void load_volume_table()
-{
- int i;
- int ret;
+void load_volume_table() {
+ fstab = fs_mgr_read_fstab_default();
+ if (!fstab) {
+ LOG(ERROR) << "Failed to read default fstab";
+ return;
+ }
- fstab = fs_mgr_read_fstab_default();
- if (!fstab) {
- LOG(ERROR) << "failed to read default fstab";
- return;
- }
+ int ret = fs_mgr_add_entry(fstab, "/tmp", "ramdisk", "ramdisk");
+ if (ret == -1) {
+ LOG(ERROR) << "Failed to add /tmp entry to fstab";
+ fs_mgr_free_fstab(fstab);
+ fstab = nullptr;
+ return;
+ }
- ret = fs_mgr_add_entry(fstab, "/tmp", "ramdisk", "ramdisk");
- if (ret < 0 ) {
- LOG(ERROR) << "failed to add /tmp entry to fstab";
- fs_mgr_free_fstab(fstab);
- fstab = NULL;
- return;
- }
-
- printf("recovery filesystem table\n");
- printf("=========================\n");
- for (i = 0; i < fstab->num_entries; ++i) {
- Volume* v = &fstab->recs[i];
- printf(" %d %s %s %s %lld\n", i, v->mount_point, v->fs_type,
- v->blk_device, v->length);
- }
- printf("\n");
+ printf("recovery filesystem table\n");
+ printf("=========================\n");
+ for (int i = 0; i < fstab->num_entries; ++i) {
+ const Volume* v = &fstab->recs[i];
+ printf(" %d %s %s %s %lld\n", i, v->mount_point, v->fs_type, v->blk_device, v->length);
+ }
+ printf("\n");
}
Volume* volume_for_path(const char* path) {
- return fs_mgr_get_entry_for_mount_point(fstab, path);
+ return fs_mgr_get_entry_for_mount_point(fstab, path);
}
// Mount the volume specified by path at the given mount_point.
int ensure_path_mounted_at(const char* path, const char* mount_point) {
- Volume* v = volume_for_path(path);
- if (v == NULL) {
- LOG(ERROR) << "unknown volume for path [" << path << "]";
- return -1;
- }
- if (strcmp(v->fs_type, "ramdisk") == 0) {
- // the ramdisk is always mounted.
- return 0;
- }
-
- if (!scan_mounted_volumes()) {
- LOG(ERROR) << "failed to scan mounted volumes";
- return -1;
- }
-
- if (!mount_point) {
- mount_point = v->mount_point;
- }
-
- MountedVolume* mv = find_mounted_volume_by_mount_point(mount_point);
- if (mv) {
- // volume is already mounted
- return 0;
- }
-
- mkdir(mount_point, 0755); // in case it doesn't already exist
-
- if (strcmp(v->fs_type, "ext4") == 0 ||
- strcmp(v->fs_type, "squashfs") == 0 ||
- strcmp(v->fs_type, "vfat") == 0) {
- int result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
- if (result == -1 && fs_mgr_is_formattable(v)) {
- LOG(ERROR) << "failed to mount " << mount_point << " (" << strerror(errno)
- << ") , formatting.....";
- bool crypt_footer = fs_mgr_is_encryptable(v) && !strcmp(v->key_loc, "footer");
- if (fs_mgr_do_format(v, crypt_footer) == 0) {
- result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
- } else {
- PLOG(ERROR) << "failed to format " << mount_point;
- return -1;
- }
- }
-
- if (result == -1) {
- PLOG(ERROR) << "failed to mount " << mount_point;
- return -1;
- }
- return 0;
- }
-
- LOG(ERROR) << "unknown fs_type \"" << v->fs_type << "\" for " << mount_point;
+ Volume* v = volume_for_path(path);
+ if (v == nullptr) {
+ LOG(ERROR) << "unknown volume for path [" << path << "]";
return -1;
+ }
+ if (strcmp(v->fs_type, "ramdisk") == 0) {
+ // The ramdisk is always mounted.
+ return 0;
+ }
+
+ if (!scan_mounted_volumes()) {
+ LOG(ERROR) << "Failed to scan mounted volumes";
+ return -1;
+ }
+
+ if (!mount_point) {
+ mount_point = v->mount_point;
+ }
+
+ const MountedVolume* mv = find_mounted_volume_by_mount_point(mount_point);
+ if (mv != nullptr) {
+ // Volume is already mounted.
+ return 0;
+ }
+
+ mkdir(mount_point, 0755); // in case it doesn't already exist
+
+ if (strcmp(v->fs_type, "ext4") == 0 || strcmp(v->fs_type, "squashfs") == 0 ||
+ strcmp(v->fs_type, "vfat") == 0) {
+ int result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
+ if (result == -1 && fs_mgr_is_formattable(v)) {
+ PLOG(ERROR) << "Failed to mount " << mount_point << "; formatting";
+ bool crypt_footer = fs_mgr_is_encryptable(v) && !strcmp(v->key_loc, "footer");
+ if (fs_mgr_do_format(v, crypt_footer) == 0) {
+ result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
+ } else {
+ PLOG(ERROR) << "Failed to format " << mount_point;
+ return -1;
+ }
+ }
+
+ if (result == -1) {
+ PLOG(ERROR) << "Failed to mount " << mount_point;
+ return -1;
+ }
+ return 0;
+ }
+
+ LOG(ERROR) << "unknown fs_type \"" << v->fs_type << "\" for " << mount_point;
+ return -1;
}
int ensure_path_mounted(const char* path) {
- // Mount at the default mount point.
- return ensure_path_mounted_at(path, nullptr);
+ // Mount at the default mount point.
+ return ensure_path_mounted_at(path, nullptr);
}
int ensure_path_unmounted(const char* path) {
- Volume* v = volume_for_path(path);
- if (v == NULL) {
- LOG(ERROR) << "unknown volume for path [" << path << "]";
- return -1;
- }
- if (strcmp(v->fs_type, "ramdisk") == 0) {
- // the ramdisk is always mounted; you can't unmount it.
- return -1;
- }
+ const Volume* v = volume_for_path(path);
+ if (v == nullptr) {
+ LOG(ERROR) << "unknown volume for path [" << path << "]";
+ return -1;
+ }
+ if (strcmp(v->fs_type, "ramdisk") == 0) {
+ // The ramdisk is always mounted; you can't unmount it.
+ return -1;
+ }
- if (!scan_mounted_volumes()) {
- LOG(ERROR) << "failed to scan mounted volumes";
- return -1;
- }
+ if (!scan_mounted_volumes()) {
+ LOG(ERROR) << "Failed to scan mounted volumes";
+ return -1;
+ }
- MountedVolume* mv = find_mounted_volume_by_mount_point(v->mount_point);
- if (mv == NULL) {
- // volume is already unmounted
- return 0;
- }
+ MountedVolume* mv = find_mounted_volume_by_mount_point(v->mount_point);
+ if (mv == nullptr) {
+ // Volume is already unmounted.
+ return 0;
+ }
- return unmount_mounted_volume(mv);
+ return unmount_mounted_volume(mv);
}
-static int exec_cmd(const char* path, char* const argv[]) {
- int status;
- pid_t child;
- if ((child = vfork()) == 0) {
- execv(path, argv);
- _exit(EXIT_FAILURE);
- }
- waitpid(child, &status, 0);
- if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- LOG(ERROR) << path << " failed with status " << WEXITSTATUS(status);
- }
- return WEXITSTATUS(status);
+static int exec_cmd(const std::vector<std::string>& args) {
+ CHECK_NE(static_cast<size_t>(0), args.size());
+
+ std::vector<char*> argv(args.size());
+ std::transform(args.cbegin(), args.cend(), argv.begin(),
+ [](const std::string& arg) { return const_cast<char*>(arg.c_str()); });
+ argv.push_back(nullptr);
+
+ pid_t child;
+ if ((child = vfork()) == 0) {
+ execv(argv[0], argv.data());
+ _exit(EXIT_FAILURE);
+ }
+
+ int status;
+ waitpid(child, &status, 0);
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ LOG(ERROR) << args[0] << " failed with status " << WEXITSTATUS(status);
+ }
+ return WEXITSTATUS(status);
}
static ssize_t get_file_size(int fd, uint64_t reserve_len) {
@@ -192,136 +196,116 @@
}
int format_volume(const char* volume, const char* directory) {
- Volume* v = volume_for_path(volume);
- if (v == NULL) {
- LOG(ERROR) << "unknown volume \"" << volume << "\"";
- return -1;
- }
- if (strcmp(v->fs_type, "ramdisk") == 0) {
- // you can't format the ramdisk.
- LOG(ERROR) << "can't format_volume \"" << volume << "\"";
- return -1;
- }
- if (strcmp(v->mount_point, volume) != 0) {
- LOG(ERROR) << "can't give path \"" << volume << "\" to format_volume";
- return -1;
- }
-
- if (ensure_path_unmounted(volume) != 0) {
- LOG(ERROR) << "format_volume failed to unmount \"" << v->mount_point << "\"";
- return -1;
- }
-
- if (strcmp(v->fs_type, "ext4") == 0 || strcmp(v->fs_type, "f2fs") == 0) {
- // if there's a key_loc that looks like a path, it should be a
- // block device for storing encryption metadata. wipe it too.
- if (v->key_loc != NULL && v->key_loc[0] == '/') {
- LOG(INFO) << "wiping " << v->key_loc;
- int fd = open(v->key_loc, O_WRONLY | O_CREAT, 0644);
- if (fd < 0) {
- LOG(ERROR) << "format_volume: failed to open " << v->key_loc;
- return -1;
- }
- wipe_block_device(fd, get_file_size(fd));
- close(fd);
- }
-
- ssize_t length = 0;
- if (v->length != 0) {
- length = v->length;
- } else if (v->key_loc != NULL && strcmp(v->key_loc, "footer") == 0) {
- android::base::unique_fd fd(open(v->blk_device, O_RDONLY));
- if (fd < 0) {
- PLOG(ERROR) << "get_file_size: failed to open " << v->blk_device;
- return -1;
- }
- length = get_file_size(fd.get(), CRYPT_FOOTER_OFFSET);
- if (length <= 0) {
- LOG(ERROR) << "get_file_size: invalid size " << length << " for " << v->blk_device;
- return -1;
- }
- }
- int result;
- if (strcmp(v->fs_type, "ext4") == 0) {
- static constexpr int block_size = 4096;
- int raid_stride = v->logical_blk_size / block_size;
- int raid_stripe_width = v->erase_blk_size / block_size;
-
- // stride should be the max of 8kb and logical block size
- if (v->logical_blk_size != 0 && v->logical_blk_size < 8192) {
- raid_stride = 8192 / block_size;
- }
-
- const char* mke2fs_argv[] = { "/sbin/mke2fs_static",
- "-F",
- "-t",
- "ext4",
- "-b",
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr };
-
- int i = 5;
- std::string block_size_str = std::to_string(block_size);
- mke2fs_argv[i++] = block_size_str.c_str();
-
- std::string ext_args;
- if (v->erase_blk_size != 0 && v->logical_blk_size != 0) {
- ext_args = android::base::StringPrintf("stride=%d,stripe-width=%d", raid_stride,
- raid_stripe_width);
- mke2fs_argv[i++] = "-E";
- mke2fs_argv[i++] = ext_args.c_str();
- }
-
- mke2fs_argv[i++] = v->blk_device;
-
- std::string size_str = std::to_string(length / block_size);
- if (length != 0) {
- mke2fs_argv[i++] = size_str.c_str();
- }
-
- result = exec_cmd(mke2fs_argv[0], const_cast<char**>(mke2fs_argv));
- if (result == 0 && directory != nullptr) {
- const char* e2fsdroid_argv[] = { "/sbin/e2fsdroid_static",
- "-e",
- "-f",
- directory,
- "-a",
- volume,
- v->blk_device,
- nullptr };
-
- result = exec_cmd(e2fsdroid_argv[0], const_cast<char**>(e2fsdroid_argv));
- }
- } else { /* Has to be f2fs because we checked earlier. */
- char *num_sectors = nullptr;
- if (length >= 512 && asprintf(&num_sectors, "%zd", length / 512) <= 0) {
- LOG(ERROR) << "format_volume: failed to create " << v->fs_type
- << " command for " << v->blk_device;
- return -1;
- }
- const char *f2fs_path = "/sbin/mkfs.f2fs";
- const char* const f2fs_argv[] = {"mkfs.f2fs", "-t", "-d1", v->blk_device, num_sectors, nullptr};
-
- result = exec_cmd(f2fs_path, (char* const*)f2fs_argv);
- free(num_sectors);
- }
- if (result != 0) {
- PLOG(ERROR) << "format_volume: make " << v->fs_type << " failed on " << v->blk_device;
- return -1;
- }
- return 0;
- }
-
+ const Volume* v = volume_for_path(volume);
+ if (v == nullptr) {
+ LOG(ERROR) << "unknown volume \"" << volume << "\"";
+ return -1;
+ }
+ if (strcmp(v->fs_type, "ramdisk") == 0) {
+ LOG(ERROR) << "can't format_volume \"" << volume << "\"";
+ return -1;
+ }
+ if (strcmp(v->mount_point, volume) != 0) {
+ LOG(ERROR) << "can't give path \"" << volume << "\" to format_volume";
+ return -1;
+ }
+ if (ensure_path_unmounted(volume) != 0) {
+ LOG(ERROR) << "format_volume: Failed to unmount \"" << v->mount_point << "\"";
+ return -1;
+ }
+ if (strcmp(v->fs_type, "ext4") != 0 && strcmp(v->fs_type, "f2fs") != 0) {
LOG(ERROR) << "format_volume: fs_type \"" << v->fs_type << "\" unsupported";
return -1;
+ }
+
+ // If there's a key_loc that looks like a path, it should be a block device for storing encryption
+ // metadata. Wipe it too.
+ if (v->key_loc != nullptr && v->key_loc[0] == '/') {
+ LOG(INFO) << "Wiping " << v->key_loc;
+ int fd = open(v->key_loc, O_WRONLY | O_CREAT, 0644);
+ if (fd == -1) {
+ PLOG(ERROR) << "format_volume: Failed to open " << v->key_loc;
+ return -1;
+ }
+ wipe_block_device(fd, get_file_size(fd));
+ close(fd);
+ }
+
+ ssize_t length = 0;
+ if (v->length != 0) {
+ length = v->length;
+ } else if (v->key_loc != nullptr && strcmp(v->key_loc, "footer") == 0) {
+ android::base::unique_fd fd(open(v->blk_device, O_RDONLY));
+ if (fd == -1) {
+ PLOG(ERROR) << "get_file_size: failed to open " << v->blk_device;
+ return -1;
+ }
+ length = get_file_size(fd.get(), CRYPT_FOOTER_OFFSET);
+ if (length <= 0) {
+ LOG(ERROR) << "get_file_size: invalid size " << length << " for " << v->blk_device;
+ return -1;
+ }
+ }
+
+ if (strcmp(v->fs_type, "ext4") == 0) {
+ static constexpr int kBlockSize = 4096;
+ std::vector<std::string> mke2fs_args = {
+ "/sbin/mke2fs_static", "-F", "-t", "ext4", "-b", std::to_string(kBlockSize),
+ };
+
+ int raid_stride = v->logical_blk_size / kBlockSize;
+ int raid_stripe_width = v->erase_blk_size / kBlockSize;
+ // stride should be the max of 8KB and logical block size
+ if (v->logical_blk_size != 0 && v->logical_blk_size < 8192) {
+ raid_stride = 8192 / kBlockSize;
+ }
+ if (v->erase_blk_size != 0 && v->logical_blk_size != 0) {
+ mke2fs_args.push_back("-E");
+ mke2fs_args.push_back(
+ android::base::StringPrintf("stride=%d,stripe-width=%d", raid_stride, raid_stripe_width));
+ }
+ mke2fs_args.push_back(v->blk_device);
+ if (length != 0) {
+ mke2fs_args.push_back(std::to_string(length / kBlockSize));
+ }
+
+ int result = exec_cmd(mke2fs_args);
+ if (result == 0 && directory != nullptr) {
+ std::vector<std::string> e2fsdroid_args = {
+ "/sbin/e2fsdroid_static",
+ "-e",
+ "-f",
+ directory,
+ "-a",
+ volume,
+ v->blk_device,
+ };
+ result = exec_cmd(e2fsdroid_args);
+ }
+
+ if (result != 0) {
+ PLOG(ERROR) << "format_volume: Failed to make ext4 on " << v->blk_device;
+ return -1;
+ }
+ return 0;
+ }
+
+ // Has to be f2fs because we checked earlier.
+ std::vector<std::string> f2fs_args = { "/sbin/mkfs.f2fs", "-t", "-d1", v->blk_device };
+ if (length >= 512) {
+ f2fs_args.push_back(std::to_string(length / 512));
+ }
+
+ int result = exec_cmd(f2fs_args);
+ if (result != 0) {
+ PLOG(ERROR) << "format_volume: Failed to make f2fs on " << v->blk_device;
+ return -1;
+ }
+ return 0;
}
int format_volume(const char* volume) {
- return format_volume(volume, NULL);
+ return format_volume(volume, nullptr);
}
int setup_install_mounts() {
@@ -339,12 +323,12 @@
if (strcmp(v->mount_point, "/tmp") == 0 || strcmp(v->mount_point, "/cache") == 0) {
if (ensure_path_mounted(v->mount_point) != 0) {
- LOG(ERROR) << "failed to mount " << v->mount_point;
+ LOG(ERROR) << "Failed to mount " << v->mount_point;
return -1;
}
} else {
if (ensure_path_unmounted(v->mount_point) != 0) {
- LOG(ERROR) << "failed to unmount " << v->mount_point;
+ LOG(ERROR) << "Failed to unmount " << v->mount_point;
return -1;
}
}
diff --git a/tests/unit/dirutil_test.cpp b/tests/unit/dirutil_test.cpp
index 5e2ae4f..7f85d13 100644
--- a/tests/unit/dirutil_test.cpp
+++ b/tests/unit/dirutil_test.cpp
@@ -26,23 +26,23 @@
TEST(DirUtilTest, create_invalid) {
// Requesting to create an empty dir is invalid.
- ASSERT_EQ(-1, dirCreateHierarchy("", 0755, nullptr, false, nullptr));
+ ASSERT_EQ(-1, mkdir_recursively("", 0755, false, nullptr));
ASSERT_EQ(ENOENT, errno);
// Requesting to strip the name with no slash present.
- ASSERT_EQ(-1, dirCreateHierarchy("abc", 0755, nullptr, true, nullptr));
+ ASSERT_EQ(-1, mkdir_recursively("abc", 0755, true, nullptr));
ASSERT_EQ(ENOENT, errno);
// Creating a dir that already exists.
TemporaryDir td;
- ASSERT_EQ(0, dirCreateHierarchy(td.path, 0755, nullptr, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(td.path, 0755, false, nullptr));
// "///" is a valid dir.
- ASSERT_EQ(0, dirCreateHierarchy("///", 0755, nullptr, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively("///", 0755, false, nullptr));
// Request to create a dir, but a file with the same name already exists.
TemporaryFile tf;
- ASSERT_EQ(-1, dirCreateHierarchy(tf.path, 0755, nullptr, false, nullptr));
+ ASSERT_EQ(-1, mkdir_recursively(tf.path, 0755, false, nullptr));
ASSERT_EQ(ENOTDIR, errno);
}
@@ -51,7 +51,7 @@
std::string prefix(td.path);
std::string path = prefix + "/a/b";
constexpr mode_t mode = 0755;
- ASSERT_EQ(0, dirCreateHierarchy(path.c_str(), mode, nullptr, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(path, mode, false, nullptr));
// Verify.
struct stat sb;
@@ -69,7 +69,7 @@
TemporaryDir td;
std::string prefix(td.path);
std::string path = prefix + "/a/b";
- ASSERT_EQ(0, dirCreateHierarchy(path.c_str(), 0755, nullptr, true, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(path, 0755, true, nullptr));
// Verify that "../a" exists but not "../a/b".
struct stat sb;
@@ -83,31 +83,21 @@
ASSERT_EQ(0, rmdir((prefix + "/a").c_str()));
}
-TEST(DirUtilTest, create_mode_and_timestamp) {
+TEST(DirUtilTest, create_mode) {
TemporaryDir td;
std::string prefix(td.path);
std::string path = prefix + "/a/b";
- // Set the timestamp to 8/1/2008.
- constexpr struct utimbuf timestamp = { 1217592000, 1217592000 };
constexpr mode_t mode = 0751;
- ASSERT_EQ(0, dirCreateHierarchy(path.c_str(), mode, ×tamp, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(path, mode, false, nullptr));
- // Verify the mode and timestamp for "../a/b".
+ // Verify the mode for "../a/b".
struct stat sb;
ASSERT_EQ(0, stat(path.c_str(), &sb)) << strerror(errno);
ASSERT_TRUE(S_ISDIR(sb.st_mode));
constexpr mode_t mask = S_IRWXU | S_IRWXG | S_IRWXO;
ASSERT_EQ(mode, sb.st_mode & mask);
- timespec time;
- time.tv_sec = 1217592000;
- time.tv_nsec = 0;
-
- ASSERT_EQ(time.tv_sec, static_cast<long>(sb.st_atime));
- ASSERT_EQ(time.tv_sec, static_cast<long>(sb.st_mtime));
-
- // Verify the mode for "../a". Note that the timestamp for intermediate directories (e.g. "../a")
- // may not be 'timestamp' according to the current implementation.
+ // Verify the mode for "../a".
ASSERT_EQ(0, stat((prefix + "/a").c_str(), &sb)) << strerror(errno);
ASSERT_TRUE(S_ISDIR(sb.st_mode));
ASSERT_EQ(mode, sb.st_mode & mask);
@@ -116,35 +106,3 @@
ASSERT_EQ(0, rmdir((prefix + "/a/b").c_str()));
ASSERT_EQ(0, rmdir((prefix + "/a").c_str()));
}
-
-TEST(DirUtilTest, unlink_invalid) {
- // File doesn't exist.
- ASSERT_EQ(-1, dirUnlinkHierarchy("doesntexist"));
-
- // Nonexistent directory.
- TemporaryDir td;
- std::string path(td.path);
- ASSERT_EQ(-1, dirUnlinkHierarchy((path + "/a").c_str()));
- ASSERT_EQ(ENOENT, errno);
-}
-
-TEST(DirUtilTest, unlink_smoke) {
- // Unlink a file.
- TemporaryFile tf;
- ASSERT_EQ(0, dirUnlinkHierarchy(tf.path));
- ASSERT_EQ(-1, access(tf.path, F_OK));
-
- TemporaryDir td;
- std::string path(td.path);
- constexpr mode_t mode = 0700;
- ASSERT_EQ(0, mkdir((path + "/a").c_str(), mode));
- ASSERT_EQ(0, mkdir((path + "/a/b").c_str(), mode));
- ASSERT_EQ(0, mkdir((path + "/a/b/c").c_str(), mode));
- ASSERT_EQ(0, mkdir((path + "/a/d").c_str(), mode));
-
- // Remove "../a" recursively.
- ASSERT_EQ(0, dirUnlinkHierarchy((path + "/a").c_str()));
-
- // Verify it's gone.
- ASSERT_EQ(-1, access((path + "/a").c_str(), F_OK));
-}
diff --git a/tests/unit/rangeset_test.cpp b/tests/unit/rangeset_test.cpp
index 3c6d77e..3993cb9 100644
--- a/tests/unit/rangeset_test.cpp
+++ b/tests/unit/rangeset_test.cpp
@@ -110,3 +110,50 @@
}
ASSERT_EQ((std::vector<Range>{ Range{ 8, 10 }, Range{ 1, 5 } }), ranges);
}
+
+TEST(RangeSetTest, tostring) {
+ ASSERT_EQ("2,1,6", RangeSet::Parse("2,1,6").ToString());
+ ASSERT_EQ("4,1,5,8,10", RangeSet::Parse("4,1,5,8,10").ToString());
+ ASSERT_EQ("6,1,3,4,6,15,22", RangeSet::Parse("6,1,3,4,6,15,22").ToString());
+}
+
+TEST(SortedRangeSetTest, insertion) {
+ SortedRangeSet rs({ { 2, 3 }, { 4, 6 }, { 8, 14 } });
+ rs.Insert({ 1, 2 });
+ ASSERT_EQ(SortedRangeSet({ { 1, 3 }, { 4, 6 }, { 8, 14 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(10), rs.blocks());
+ rs.Insert({ 3, 5 });
+ ASSERT_EQ(SortedRangeSet({ { 1, 6 }, { 8, 14 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(11), rs.blocks());
+
+ SortedRangeSet r1({ { 20, 22 }, { 15, 18 } });
+ rs.Insert(r1);
+ ASSERT_EQ(SortedRangeSet({ { 1, 6 }, { 8, 14 }, { 15, 18 }, { 20, 22 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(16), rs.blocks());
+
+ SortedRangeSet r2({ { 2, 7 }, { 15, 21 }, { 20, 25 } });
+ rs.Insert(r2);
+ ASSERT_EQ(SortedRangeSet({ { 1, 7 }, { 8, 14 }, { 15, 25 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(22), rs.blocks());
+}
+
+TEST(SortedRangeSetTest, file_range) {
+ SortedRangeSet rs;
+ rs.Insert(4096, 4096);
+ ASSERT_EQ(SortedRangeSet({ { 1, 2 } }), rs);
+ // insert block 2-9
+ rs.Insert(4096 * 3 - 1, 4096 * 7);
+ ASSERT_EQ(SortedRangeSet({ { 1, 10 } }), rs);
+ // insert block 15-19
+ rs.Insert(4096 * 15 + 1, 4096 * 4);
+ ASSERT_EQ(SortedRangeSet({ { 1, 10 }, { 15, 20 } }), rs);
+
+ // rs overlaps block 2-2
+ ASSERT_TRUE(rs.Overlaps(4096 * 2 - 1, 10));
+ ASSERT_FALSE(rs.Overlaps(4096 * 10, 4096 * 5));
+
+ ASSERT_EQ(static_cast<size_t>(10), rs.GetOffsetInRangeSet(4106));
+ ASSERT_EQ(static_cast<size_t>(40970), rs.GetOffsetInRangeSet(4096 * 16 + 10));
+ // block#10 not in range.
+ ASSERT_EXIT(rs.GetOffsetInRangeSet(40970), ::testing::KilledBySignal(SIGABRT), "");
+}
\ No newline at end of file
diff --git a/tools/recovery_l10n/res/values-en-rXC/strings.xml b/tools/recovery_l10n/res/values-en-rXC/strings.xml
deleted file mode 100644
index 2d528b3..0000000
--- a/tools/recovery_l10n/res/values-en-rXC/strings.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<resources xmlns:android="http://schemas.android.com/apk/res/android"
- xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
- <string name="recovery_installing" msgid="2013591905463558223">"Installing system update"</string>
- <string name="recovery_erasing" msgid="7334826894904037088">"Erasing"</string>
- <string name="recovery_no_command" msgid="4465476568623024327">"No command"</string>
- <string name="recovery_error" msgid="5748178989622716736">"Error!"</string>
- <string name="recovery_installing_security" msgid="9184031299717114342">"Installing security update"</string>
-</resources>
diff --git a/uncrypt/Android.mk b/uncrypt/Android.mk
index 59084b0..cb60c72 100644
--- a/uncrypt/Android.mk
+++ b/uncrypt/Android.mk
@@ -16,7 +16,6 @@
include $(CLEAR_VARS)
-LOCAL_CLANG := true
LOCAL_SRC_FILES := uncrypt.cpp
LOCAL_C_INCLUDES := $(LOCAL_PATH)/..
LOCAL_MODULE := uncrypt
diff --git a/uncrypt/uncrypt.cpp b/uncrypt/uncrypt.cpp
index ad3bdce..7a2ccbc 100644
--- a/uncrypt/uncrypt.cpp
+++ b/uncrypt/uncrypt.cpp
@@ -448,20 +448,20 @@
static int uncrypt(const char* input_path, const char* map_file, const int socket) {
LOG(INFO) << "update package is \"" << input_path << "\"";
- // Turn the name of the file we're supposed to convert into an
- // absolute path, so we can find what filesystem it's on.
+ // Turn the name of the file we're supposed to convert into an absolute path, so we can find
+ // what filesystem it's on.
char path[PATH_MAX+1];
- if (realpath(input_path, path) == NULL) {
+ if (realpath(input_path, path) == nullptr) {
PLOG(ERROR) << "failed to convert \"" << input_path << "\" to absolute path";
- return 1;
+ return kUncryptRealpathFindError;
}
bool encryptable;
bool encrypted;
const char* blk_dev = find_block_device(path, &encryptable, &encrypted);
- if (blk_dev == NULL) {
+ if (blk_dev == nullptr) {
LOG(ERROR) << "failed to find block device for " << path;
- return 1;
+ return kUncryptBlockDeviceFindError;
}
// If the filesystem it's on isn't encrypted, we only produce the
diff --git a/update_verifier/update_verifier.cpp b/update_verifier/update_verifier.cpp
index ceb3ec9..faebbed 100644
--- a/update_verifier/update_verifier.cpp
+++ b/update_verifier/update_verifier.cpp
@@ -272,23 +272,36 @@
// The current slot has not booted successfully.
#if defined(PRODUCT_SUPPORTS_VERITY) || defined(BOARD_AVB_ENABLE)
+ bool skip_verification = false;
std::string verity_mode = android::base::GetProperty("ro.boot.veritymode", "");
if (verity_mode.empty()) {
+ // With AVB it's possible to disable verification entirely and
+ // in this case ro.boot.veritymode is empty.
+#if defined(BOARD_AVB_ENABLE)
+ LOG(WARNING) << "verification has been disabled; marking without verification.";
+ skip_verification = true;
+#else
LOG(ERROR) << "Failed to get dm-verity mode.";
return reboot_device();
+#endif
} else if (android::base::EqualsIgnoreCase(verity_mode, "eio")) {
// We shouldn't see verity in EIO mode if the current slot hasn't booted successfully before.
// Continue the verification until we fail to read some blocks.
LOG(WARNING) << "Found dm-verity in EIO mode.";
+ } else if (android::base::EqualsIgnoreCase(verity_mode, "disabled")) {
+ LOG(WARNING) << "dm-verity in disabled mode; marking without verification.";
+ skip_verification = true;
} else if (verity_mode != "enforcing") {
LOG(ERROR) << "Unexpected dm-verity mode : " << verity_mode << ", expecting enforcing.";
return reboot_device();
}
- static constexpr auto CARE_MAP_FILE = "/data/ota_package/care_map.txt";
- if (!verify_image(CARE_MAP_FILE)) {
- LOG(ERROR) << "Failed to verify all blocks in care map file.";
- return reboot_device();
+ if (!skip_verification) {
+ static constexpr auto CARE_MAP_FILE = "/data/ota_package/care_map.txt";
+ if (!verify_image(CARE_MAP_FILE)) {
+ LOG(ERROR) << "Failed to verify all blocks in care map file.";
+ return reboot_device();
+ }
}
#else
LOG(WARNING) << "dm-verity not enabled; marking without verification.";
diff --git a/updater/include/updater/rangeset.h b/updater/include/updater/rangeset.h
index fad0380..b67c987 100644
--- a/updater/include/updater/rangeset.h
+++ b/updater/include/updater/rangeset.h
@@ -24,6 +24,7 @@
#include <android-base/logging.h>
#include <android-base/parseint.h>
+#include <android-base/stringprintf.h>
#include <android-base/strings.h>
using Range = std::pair<size_t, size_t>;
@@ -74,6 +75,18 @@
return RangeSet(std::move(pairs));
}
+ std::string ToString() const {
+ if (ranges_.empty()) {
+ return "";
+ }
+ std::string result = std::to_string(ranges_.size() * 2);
+ for (const auto& r : ranges_) {
+ result += android::base::StringPrintf(",%zu,%zu", r.first, r.second);
+ }
+
+ return result;
+ }
+
// Get the block number for the i-th (starting from 0) block in the RangeSet.
size_t GetBlockNumber(size_t idx) const {
CHECK_LT(idx, blocks_) << "Out of bound index " << idx << " (total blocks: " << blocks_ << ")";
@@ -157,8 +170,109 @@
return ranges_ != other.ranges_;
}
- private:
+ protected:
// Actual limit for each value and the total number are both INT_MAX.
std::vector<Range> ranges_;
size_t blocks_;
};
+
+static constexpr size_t kBlockSize = 4096;
+
+// The class is a sorted version of a RangeSet; and it's useful in imgdiff to split the input
+// files when we're handling large zip files. Specifically, we can treat the input file as a
+// continuous RangeSet (i.e. RangeSet("0-99") for a 100 blocks file); and break it down into
+// several smaller chunks based on the zip entries.
+
+// For example, [source: 0-99] can be split into
+// [split_src1: 10-29]; [split_src2: 40-49, 60-69]; [split_src3: 70-89]
+// Here "10-29" simply means block 10th to block 29th with respect to the original input file.
+// Also, note that the split sources should be mutual exclusive, but they don't need to cover
+// every block in the original source.
+class SortedRangeSet : public RangeSet {
+ public:
+ SortedRangeSet() {}
+
+ // Ranges in the the set should be mutually exclusive; and they're sorted by the start block.
+ explicit SortedRangeSet(std::vector<Range>&& pairs) : RangeSet(std::move(pairs)) {
+ std::sort(ranges_.begin(), ranges_.end());
+ }
+
+ void Insert(const Range& to_insert) {
+ SortedRangeSet rs({ to_insert });
+ Insert(rs);
+ }
+
+ // Insert the input SortedRangeSet; keep the ranges sorted and merge the overlap ranges.
+ void Insert(const SortedRangeSet& rs) {
+ if (rs.size() == 0) {
+ return;
+ }
+ // Merge and sort the two RangeSets.
+ std::vector<Range> temp = std::move(ranges_);
+ std::copy(rs.begin(), rs.end(), std::back_inserter(temp));
+ std::sort(temp.begin(), temp.end());
+
+ Clear();
+ // Trim overlaps and insert the result back to ranges_.
+ Range to_insert = temp.front();
+ for (auto it = temp.cbegin() + 1; it != temp.cend(); it++) {
+ if (it->first <= to_insert.second) {
+ to_insert.second = std::max(to_insert.second, it->second);
+ } else {
+ ranges_.push_back(to_insert);
+ blocks_ += (to_insert.second - to_insert.first);
+ to_insert = *it;
+ }
+ }
+ ranges_.push_back(to_insert);
+ blocks_ += (to_insert.second - to_insert.first);
+ }
+
+ void Clear() {
+ blocks_ = 0;
+ ranges_.clear();
+ }
+
+ using RangeSet::Overlaps;
+ bool Overlaps(size_t start, size_t len) const {
+ RangeSet rs({ { start / kBlockSize, (start + len - 1) / kBlockSize + 1 } });
+ return Overlaps(rs);
+ }
+
+ // Compute the block range the file occupies, and insert that range.
+ void Insert(size_t start, size_t len) {
+ Range to_insert{ start / kBlockSize, (start + len - 1) / kBlockSize + 1 };
+ Insert(to_insert);
+ }
+
+ // Given an offset of the file, checks if the corresponding block (by considering the file as
+ // 0-based continuous block ranges) is covered by the SortedRangeSet. If so, returns the offset
+ // within this SortedRangeSet.
+ //
+ // For example, the 4106-th byte of a file is from block 1, assuming a block size of 4096-byte.
+ // The mapped offset within a SortedRangeSet("1-9 15-19") is 10.
+ //
+ // An offset of 65546 falls into the 16-th block in a file. Block 16 is contained as the 10-th
+ // item in SortedRangeSet("1-9 15-19"). So its data can be found at offset 40970 (i.e. 4096 * 10
+ // + 10) in a range represented by this SortedRangeSet.
+ size_t GetOffsetInRangeSet(size_t old_offset) const {
+ size_t old_block_start = old_offset / kBlockSize;
+ size_t new_block_start = 0;
+ for (const auto& range : ranges_) {
+ // Find the index of old_block_start.
+ if (old_block_start >= range.second) {
+ new_block_start += (range.second - range.first);
+ } else if (old_block_start >= range.first) {
+ new_block_start += (old_block_start - range.first);
+ return (new_block_start * kBlockSize + old_offset % kBlockSize);
+ } else {
+ CHECK(false) <<"block_start " << old_block_start << " is missing between two ranges: "
+ << this->ToString();
+ return 0;
+ }
+ }
+ CHECK(false) <<"block_start " << old_block_start << " exceeds the limit of current RangeSet: "
+ << this->ToString();
+ return 0;
+ }
+};
\ No newline at end of file
diff --git a/updater/install.cpp b/updater/install.cpp
index bfe91e7..8e54c2e 100644
--- a/updater/install.cpp
+++ b/updater/install.cpp
@@ -95,34 +95,6 @@
uiPrint(state, error_msg);
}
-static bool is_dir(const std::string& dirpath) {
- struct stat st;
- return stat(dirpath.c_str(), &st) == 0 && S_ISDIR(st.st_mode);
-}
-
-// Create all parent directories of name, if necessary.
-static bool make_parents(const std::string& name) {
- size_t prev_end = 0;
- while (prev_end < name.size()) {
- size_t next_end = name.find('/', prev_end + 1);
- if (next_end == std::string::npos) {
- break;
- }
- std::string dir_path = name.substr(0, next_end);
- if (!is_dir(dir_path)) {
- int result = mkdir(dir_path.c_str(), 0700);
- if (result != 0) {
- PLOG(ERROR) << "failed to mkdir " << dir_path << " when make parents for " << name;
- return false;
- }
-
- LOG(INFO) << "created [" << dir_path << "]";
- }
- prev_end = next_end;
- }
- return true;
-}
-
// mount(fs_type, partition_type, location, mount_point)
// mount(fs_type, partition_type, location, mount_point, mount_options)