ui: Manage menu_ with std::vector.
am: 17fa5c7cab
Change-Id: Ifa4f8333a445bdf92a9cc19dacd2223260dcff45
diff --git a/Android.mk b/Android.mk
index 776e6ea..8011412 100644
--- a/Android.mk
+++ b/Android.mk
@@ -93,7 +93,7 @@
endif
LOCAL_CFLAGS += -DRECOVERY_API_VERSION=$(RECOVERY_API_VERSION)
-LOCAL_CFLAGS += -Wno-unused-parameter -Werror
+LOCAL_CFLAGS += -Wall -Wno-unused-parameter -Werror
ifneq ($(TARGET_RECOVERY_UI_MARGIN_HEIGHT),)
LOCAL_CFLAGS += -DRECOVERY_UI_MARGIN_HEIGHT=$(TARGET_RECOVERY_UI_MARGIN_HEIGHT)
@@ -159,6 +159,7 @@
libmounts \
libz \
libminadbd \
+ libasyncio \
libfusesideload \
libminui \
libpng \
@@ -203,7 +204,7 @@
rotate_logs.cpp
LOCAL_MODULE := recovery-persist
LOCAL_SHARED_LIBRARIES := liblog libbase
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_INIT_RC := recovery-persist.rc
include $(BUILD_EXECUTABLE)
@@ -215,7 +216,7 @@
rotate_logs.cpp
LOCAL_MODULE := recovery-refresh
LOCAL_SHARED_LIBRARIES := liblog libbase
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_INIT_RC := recovery-refresh.rc
include $(BUILD_EXECUTABLE)
@@ -230,13 +231,14 @@
libcrypto_utils \
libcrypto \
libbase
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
include $(BUILD_STATIC_LIBRARY)
# Wear default device
# ===============================
include $(CLEAR_VARS)
LOCAL_SRC_FILES := wear_device.cpp
+LOCAL_CFLAGS := -Wall -Werror
# Should match TARGET_RECOVERY_UI_LIB in BoardConfig.mk.
LOCAL_MODULE := librecovery_ui_wear
@@ -248,6 +250,7 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES := vr_device.cpp
+LOCAL_CFLAGS := -Wall -Werror
# should match TARGET_RECOVERY_UI_LIB set in BoardConfig.mk
LOCAL_MODULE := librecovery_ui_vr
diff --git a/applypatch/Android.mk b/applypatch/Android.mk
index a7412d2..59aa0ce 100644
--- a/applypatch/Android.mk
+++ b/applypatch/Android.mk
@@ -37,6 +37,7 @@
libz
LOCAL_CFLAGS := \
-DZLIB_CONST \
+ -Wall \
-Werror
include $(BUILD_STATIC_LIBRARY)
@@ -59,6 +60,7 @@
libz
LOCAL_CFLAGS := \
-DZLIB_CONST \
+ -Wall \
-Werror
include $(BUILD_STATIC_LIBRARY)
@@ -82,6 +84,7 @@
libz
LOCAL_CFLAGS := \
-DZLIB_CONST \
+ -Wall \
-Werror
include $(BUILD_HOST_STATIC_LIBRARY)
@@ -97,7 +100,7 @@
libbase \
libedify \
libcrypto
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
include $(BUILD_STATIC_LIBRARY)
# applypatch (target executable)
@@ -119,15 +122,17 @@
libbase \
libz \
libcutils
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
include $(BUILD_EXECUTABLE)
libimgdiff_src_files := imgdiff.cpp
# libbsdiff is compiled with -D_FILE_OFFSET_BITS=64.
libimgdiff_cflags := \
+ -Wall \
-Werror \
- -D_FILE_OFFSET_BITS=64
+ -D_FILE_OFFSET_BITS=64 \
+ -DZLIB_CONST
libimgdiff_static_libraries := \
libbsdiff \
@@ -150,7 +155,8 @@
LOCAL_STATIC_LIBRARIES := \
$(libimgdiff_static_libraries)
LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/include
+ $(LOCAL_PATH)/include \
+ bootable/recovery
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
include $(BUILD_STATIC_LIBRARY)
@@ -165,7 +171,8 @@
LOCAL_STATIC_LIBRARIES := \
$(libimgdiff_static_libraries)
LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/include
+ $(LOCAL_PATH)/include \
+ bootable/recovery
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
include $(BUILD_HOST_STATIC_LIBRARY)
@@ -174,9 +181,12 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES := imgdiff_main.cpp
LOCAL_MODULE := imgdiff
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_STATIC_LIBRARIES := \
libimgdiff \
$(libimgdiff_static_libraries) \
libbz
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/include \
+ bootable/recovery
include $(BUILD_HOST_EXECUTABLE)
diff --git a/applypatch/imgdiff.cpp b/applypatch/imgdiff.cpp
index fc24064..2eb618f 100644
--- a/applypatch/imgdiff.cpp
+++ b/applypatch/imgdiff.cpp
@@ -125,6 +125,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -139,15 +140,19 @@
#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/memory.h>
+#include <android-base/parseint.h>
#include <android-base/unique_fd.h>
-#include <ziparchive/zip_archive.h>
-
#include <bsdiff.h>
+#include <ziparchive/zip_archive.h>
#include <zlib.h>
+#include "applypatch/imgdiff_image.h"
+#include "rangeset.h"
+
using android::base::get_unaligned;
-static constexpr auto BUFFER_SIZE = 0x8000;
+static constexpr size_t BLOCK_SIZE = 4096;
+static constexpr size_t BUFFER_SIZE = 0x8000;
// If we use this function to write the offset and length (type size_t), their values should not
// exceed 2^63; because the signed bit will be casted away.
@@ -161,99 +166,78 @@
return android::base::WriteFully(fd, &value, sizeof(int32_t));
}
-class ImageChunk {
- public:
- static constexpr auto WINDOWBITS = -15; // 32kb window; negative to indicate a raw stream.
- static constexpr auto MEMLEVEL = 8; // the default value.
- static constexpr auto METHOD = Z_DEFLATED;
- static constexpr auto STRATEGY = Z_DEFAULT_STRATEGY;
+// Trim the head or tail to align with the block size. Return false if the chunk has nothing left
+// after alignment.
+static bool AlignHead(size_t* start, size_t* length) {
+ size_t residual = (*start % BLOCK_SIZE == 0) ? 0 : BLOCK_SIZE - *start % BLOCK_SIZE;
- ImageChunk(int type, size_t start, const std::vector<uint8_t>* file_content, size_t raw_data_len)
- : type_(type),
- start_(start),
- input_file_ptr_(file_content),
- raw_data_len_(raw_data_len),
- compress_level_(6),
- source_start_(0),
- source_len_(0),
- source_uncompressed_len_(0) {
- CHECK(file_content != nullptr) << "input file container can't be nullptr";
+ if (*length <= residual) {
+ *length = 0;
+ return false;
}
- int GetType() const {
- return type_;
- }
- size_t GetRawDataLength() const {
- return raw_data_len_;
- }
- const std::string& GetEntryName() const {
- return entry_name_;
+ // Trim the data in the beginning.
+ *start += residual;
+ *length -= residual;
+ return true;
+}
+
+static bool AlignTail(size_t* start, size_t* length) {
+ size_t residual = (*start + *length) % BLOCK_SIZE;
+ if (*length <= residual) {
+ *length = 0;
+ return false;
}
- // CHUNK_DEFLATE will return the uncompressed data for diff, while other types will simply return
- // the raw data.
- const uint8_t * DataForPatch() const;
- size_t DataLengthForPatch() const;
+ // Trim the data in the end.
+ *length -= residual;
+ return true;
+}
- void Dump() const {
- printf("type %d start %zu len %zu\n", type_, start_, DataLengthForPatch());
+// Remove the used blocks from the source chunk to make sure the source ranges are mutually
+// exclusive after split. Return false if we fail to get the non-overlapped ranges. In such
+// a case, we'll skip the entire source chunk.
+static bool RemoveUsedBlocks(size_t* start, size_t* length, const SortedRangeSet& used_ranges) {
+ if (!used_ranges.Overlaps(*start, *length)) {
+ return true;
}
- void SetSourceInfo(const ImageChunk& other);
- void SetEntryName(std::string entryname);
- void SetUncompressedData(std::vector<uint8_t> data);
- bool SetBonusData(const std::vector<uint8_t>& bonus_data);
+ // TODO find the largest non-overlap chunk.
+ printf("Removing block %s from %zu - %zu\n", used_ranges.ToString().c_str(), *start,
+ *start + *length - 1);
- bool operator==(const ImageChunk& other) const;
- bool operator!=(const ImageChunk& other) const {
- return !(*this == other);
+ // If there's no duplicate entry name, we should only overlap in the head or tail block. Try to
+ // trim both blocks. Skip this source chunk in case it still overlaps with the used ranges.
+ if (AlignHead(start, length) && !used_ranges.Overlaps(*start, *length)) {
+ return true;
+ }
+ if (AlignTail(start, length) && !used_ranges.Overlaps(*start, *length)) {
+ return true;
}
- size_t GetHeaderSize(size_t patch_size) const;
- // Return the offset of the next patch into the patch data.
- size_t WriteHeaderToFd(int fd, const std::vector<uint8_t>& patch, size_t offset);
+ printf("Failed to remove the overlapped block ranges; skip the source\n");
+ return false;
+}
- /*
- * Cause a gzip chunk to be treated as a normal chunk (ie, as a blob
- * of uninterpreted data). The resulting patch will likely be about
- * as big as the target file, but it lets us handle the case of images
- * where some gzip chunks are reconstructible but others aren't (by
- * treating the ones that aren't as normal chunks).
- */
- void ChangeDeflateChunkToNormal();
- bool ChangeChunkToRaw(size_t patch_size);
-
- /*
- * Verify that we can reproduce exactly the same compressed data that
- * we started with. Sets the level, method, windowBits, memLevel, and
- * strategy fields in the chunk to the encoding parameters needed to
- * produce the right output.
- */
- bool ReconstructDeflateChunk();
- bool IsAdjacentNormal(const ImageChunk& other) const;
- void MergeAdjacentNormal(const ImageChunk& other);
-
- private:
- int type_; // CHUNK_NORMAL, CHUNK_DEFLATE, CHUNK_RAW
- size_t start_; // offset of chunk in the original input file
- const std::vector<uint8_t>* input_file_ptr_; // ptr to the full content of original input file
- size_t raw_data_len_;
-
- // --- for CHUNK_DEFLATE chunks only: ---
- std::vector<uint8_t> uncompressed_data_;
- std::string entry_name_; // used for zip entries
-
- // deflate encoder parameters
- int compress_level_;
-
- size_t source_start_;
- size_t source_len_;
- size_t source_uncompressed_len_;
-
- const uint8_t* GetRawData() const;
- bool TryReconstruction(int level);
+static const struct option OPTIONS[] = {
+ { "zip-mode", no_argument, nullptr, 'z' },
+ { "bonus-file", required_argument, nullptr, 'b' },
+ { "block-limit", required_argument, nullptr, 0 },
+ { "debug-dir", required_argument, nullptr, 0 },
+ { nullptr, 0, nullptr, 0 },
};
+ImageChunk::ImageChunk(int type, size_t start, const std::vector<uint8_t>* file_content,
+ size_t raw_data_len, std::string entry_name)
+ : type_(type),
+ start_(start),
+ input_file_ptr_(file_content),
+ raw_data_len_(raw_data_len),
+ compress_level_(6),
+ entry_name_(std::move(entry_name)) {
+ CHECK(file_content != nullptr) << "input file container can't be nullptr";
+}
+
const uint8_t* ImageChunk::GetRawData() const {
CHECK_LE(start_ + raw_data_len_, input_file_ptr_->size());
return input_file_ptr_->data() + start_;
@@ -281,20 +265,6 @@
memcmp(GetRawData(), other.GetRawData(), raw_data_len_) == 0);
}
-void ImageChunk::SetSourceInfo(const ImageChunk& src) {
- source_start_ = src.start_;
- if (type_ == CHUNK_NORMAL) {
- source_len_ = src.raw_data_len_;
- } else if (type_ == CHUNK_DEFLATE) {
- source_len_ = src.raw_data_len_;
- source_uncompressed_len_ = src.uncompressed_data_.size();
- }
-}
-
-void ImageChunk::SetEntryName(std::string entryname) {
- entry_name_ = std::move(entryname);
-}
-
void ImageChunk::SetUncompressedData(std::vector<uint8_t> data) {
uncompressed_data_ = std::move(data);
}
@@ -307,80 +277,13 @@
return true;
}
-// Convert CHUNK_NORMAL & CHUNK_DEFLATE to CHUNK_RAW if the target size is
-// smaller. Also take the header size into account during size comparison.
-bool ImageChunk::ChangeChunkToRaw(size_t patch_size) {
- if (type_ == CHUNK_RAW) {
- return true;
- } else if (type_ == CHUNK_NORMAL && (raw_data_len_ <= 160 || raw_data_len_ < patch_size)) {
- type_ = CHUNK_RAW;
- return true;
- }
- return false;
-}
-
void ImageChunk::ChangeDeflateChunkToNormal() {
if (type_ != CHUNK_DEFLATE) return;
type_ = CHUNK_NORMAL;
- entry_name_.clear();
+ // No need to clear the entry name.
uncompressed_data_.clear();
}
-// Header size:
-// header_type 4 bytes
-// CHUNK_NORMAL 8*3 = 24 bytes
-// CHUNK_DEFLATE 8*5 + 4*5 = 60 bytes
-// CHUNK_RAW 4 bytes + patch_size
-size_t ImageChunk::GetHeaderSize(size_t patch_size) const {
- switch (type_) {
- case CHUNK_NORMAL:
- return 4 + 8 * 3;
- case CHUNK_DEFLATE:
- return 4 + 8 * 5 + 4 * 5;
- case CHUNK_RAW:
- return 4 + 4 + patch_size;
- default:
- CHECK(false) << "unexpected chunk type: " << type_; // Should not reach here.
- return 0;
- }
-}
-
-size_t ImageChunk::WriteHeaderToFd(int fd, const std::vector<uint8_t>& patch, size_t offset) {
- Write4(fd, type_);
- switch (type_) {
- case CHUNK_NORMAL:
- printf("normal (%10zu, %10zu) %10zu\n", start_, raw_data_len_, patch.size());
- Write8(fd, static_cast<int64_t>(source_start_));
- Write8(fd, static_cast<int64_t>(source_len_));
- Write8(fd, static_cast<int64_t>(offset));
- return offset + patch.size();
- case CHUNK_DEFLATE:
- printf("deflate (%10zu, %10zu) %10zu %s\n", start_, raw_data_len_, patch.size(),
- entry_name_.c_str());
- Write8(fd, static_cast<int64_t>(source_start_));
- Write8(fd, static_cast<int64_t>(source_len_));
- Write8(fd, static_cast<int64_t>(offset));
- Write8(fd, static_cast<int64_t>(source_uncompressed_len_));
- Write8(fd, static_cast<int64_t>(uncompressed_data_.size()));
- Write4(fd, compress_level_);
- Write4(fd, METHOD);
- Write4(fd, WINDOWBITS);
- Write4(fd, MEMLEVEL);
- Write4(fd, STRATEGY);
- return offset + patch.size();
- case CHUNK_RAW:
- printf("raw (%10zu, %10zu)\n", start_, raw_data_len_);
- Write4(fd, static_cast<int32_t>(patch.size()));
- if (!android::base::WriteFully(fd, patch.data(), patch.size())) {
- CHECK(false) << "failed to write " << patch.size() <<" bytes patch";
- }
- return offset;
- default:
- CHECK(false) << "unexpected chunk type: " << type_;
- return offset;
- }
-}
-
bool ImageChunk::IsAdjacentNormal(const ImageChunk& other) const {
if (type_ != CHUNK_NORMAL || other.type_ != CHUNK_NORMAL) {
return false;
@@ -393,14 +296,61 @@
raw_data_len_ = raw_data_len_ + other.raw_data_len_;
}
+bool ImageChunk::MakePatch(const ImageChunk& tgt, const ImageChunk& src,
+ std::vector<uint8_t>* patch_data, saidx_t** bsdiff_cache) {
+#if defined(__ANDROID__)
+ char ptemp[] = "/data/local/tmp/imgdiff-patch-XXXXXX";
+#else
+ char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
+#endif
+
+ int fd = mkstemp(ptemp);
+ if (fd == -1) {
+ printf("MakePatch failed to create a temporary file: %s\n", strerror(errno));
+ return false;
+ }
+ close(fd);
+
+ int r = bsdiff::bsdiff(src.DataForPatch(), src.DataLengthForPatch(), tgt.DataForPatch(),
+ tgt.DataLengthForPatch(), ptemp, bsdiff_cache);
+ if (r != 0) {
+ printf("bsdiff() failed: %d\n", r);
+ return false;
+ }
+
+ android::base::unique_fd patch_fd(open(ptemp, O_RDONLY));
+ if (patch_fd == -1) {
+ printf("failed to open %s: %s\n", ptemp, strerror(errno));
+ return false;
+ }
+ struct stat st;
+ if (fstat(patch_fd, &st) != 0) {
+ printf("failed to stat patch file %s: %s\n", ptemp, strerror(errno));
+ return false;
+ }
+
+ size_t sz = static_cast<size_t>(st.st_size);
+
+ patch_data->resize(sz);
+ if (!android::base::ReadFully(patch_fd, patch_data->data(), sz)) {
+ printf("failed to read \"%s\" %s\n", ptemp, strerror(errno));
+ unlink(ptemp);
+ return false;
+ }
+
+ unlink(ptemp);
+
+ return true;
+}
+
bool ImageChunk::ReconstructDeflateChunk() {
if (type_ != CHUNK_DEFLATE) {
printf("attempt to reconstruct non-deflate chunk\n");
return false;
}
- // We only check two combinations of encoder parameters: level 6
- // (the default) and level 9 (the maximum).
+ // We only check two combinations of encoder parameters: level 6 (the default) and level 9
+ // (the maximum).
for (int level = 6; level <= 9; level += 3) {
if (TryReconstruction(level)) {
compress_level_ = level;
@@ -412,10 +362,9 @@
}
/*
- * Takes the uncompressed data stored in the chunk, compresses it
- * using the zlib parameters stored in the chunk, and checks that it
- * matches exactly the compressed data we started with (also stored in
- * the chunk).
+ * Takes the uncompressed data stored in the chunk, compresses it using the zlib parameters stored
+ * in the chunk, and checks that it matches exactly the compressed data we started with (also
+ * stored in the chunk).
*/
bool ImageChunk::TryReconstruction(int level) {
z_stream strm;
@@ -458,29 +407,372 @@
return true;
}
+PatchChunk::PatchChunk(const ImageChunk& tgt, const ImageChunk& src, std::vector<uint8_t> data)
+ : type_(tgt.GetType()),
+ source_start_(src.GetStartOffset()),
+ source_len_(src.GetRawDataLength()),
+ source_uncompressed_len_(src.DataLengthForPatch()),
+ target_start_(tgt.GetStartOffset()),
+ target_len_(tgt.GetRawDataLength()),
+ target_uncompressed_len_(tgt.DataLengthForPatch()),
+ target_compress_level_(tgt.GetCompressLevel()),
+ data_(std::move(data)) {}
+
+// Construct a CHUNK_RAW patch from the target data directly.
+PatchChunk::PatchChunk(const ImageChunk& tgt)
+ : type_(CHUNK_RAW),
+ source_start_(0),
+ source_len_(0),
+ source_uncompressed_len_(0),
+ target_start_(tgt.GetStartOffset()),
+ target_len_(tgt.GetRawDataLength()),
+ target_uncompressed_len_(tgt.DataLengthForPatch()),
+ target_compress_level_(tgt.GetCompressLevel()),
+ data_(tgt.DataForPatch(), tgt.DataForPatch() + tgt.DataLengthForPatch()) {}
+
+// Return true if raw data is smaller than the patch size.
+bool PatchChunk::RawDataIsSmaller(const ImageChunk& tgt, size_t patch_size) {
+ size_t target_len = tgt.GetRawDataLength();
+ return (tgt.GetType() == CHUNK_NORMAL && (target_len <= 160 || target_len < patch_size));
+}
+
+void PatchChunk::UpdateSourceOffset(const SortedRangeSet& src_range) {
+ if (type_ == CHUNK_DEFLATE) {
+ source_start_ = src_range.GetOffsetInRangeSet(source_start_);
+ }
+}
+
+// Header size:
+// header_type 4 bytes
+// CHUNK_NORMAL 8*3 = 24 bytes
+// CHUNK_DEFLATE 8*5 + 4*5 = 60 bytes
+// CHUNK_RAW 4 bytes + patch_size
+size_t PatchChunk::GetHeaderSize() const {
+ switch (type_) {
+ case CHUNK_NORMAL:
+ return 4 + 8 * 3;
+ case CHUNK_DEFLATE:
+ return 4 + 8 * 5 + 4 * 5;
+ case CHUNK_RAW:
+ return 4 + 4 + data_.size();
+ default:
+ CHECK(false) << "unexpected chunk type: " << type_; // Should not reach here.
+ return 0;
+ }
+}
+
+// Return the offset of the next patch into the patch data.
+size_t PatchChunk::WriteHeaderToFd(int fd, size_t offset) const {
+ Write4(fd, type_);
+ switch (type_) {
+ case CHUNK_NORMAL:
+ printf("normal (%10zu, %10zu) %10zu\n", target_start_, target_len_, data_.size());
+ Write8(fd, static_cast<int64_t>(source_start_));
+ Write8(fd, static_cast<int64_t>(source_len_));
+ Write8(fd, static_cast<int64_t>(offset));
+ return offset + data_.size();
+ case CHUNK_DEFLATE:
+ printf("deflate (%10zu, %10zu) %10zu\n", target_start_, target_len_, data_.size());
+ Write8(fd, static_cast<int64_t>(source_start_));
+ Write8(fd, static_cast<int64_t>(source_len_));
+ Write8(fd, static_cast<int64_t>(offset));
+ Write8(fd, static_cast<int64_t>(source_uncompressed_len_));
+ Write8(fd, static_cast<int64_t>(target_uncompressed_len_));
+ Write4(fd, target_compress_level_);
+ Write4(fd, ImageChunk::METHOD);
+ Write4(fd, ImageChunk::WINDOWBITS);
+ Write4(fd, ImageChunk::MEMLEVEL);
+ Write4(fd, ImageChunk::STRATEGY);
+ return offset + data_.size();
+ case CHUNK_RAW:
+ printf("raw (%10zu, %10zu)\n", target_start_, target_len_);
+ Write4(fd, static_cast<int32_t>(data_.size()));
+ if (!android::base::WriteFully(fd, data_.data(), data_.size())) {
+ CHECK(false) << "failed to write " << data_.size() << " bytes patch";
+ }
+ return offset;
+ default:
+ CHECK(false) << "unexpected chunk type: " << type_;
+ return offset;
+ }
+}
+
+// Write the contents of |patch_chunks| to |patch_fd|.
+bool PatchChunk::WritePatchDataToFd(const std::vector<PatchChunk>& patch_chunks, int patch_fd) {
+ // Figure out how big the imgdiff file header is going to be, so that we can correctly compute
+ // the offset of each bsdiff patch within the file.
+ size_t total_header_size = 12;
+ for (const auto& patch : patch_chunks) {
+ total_header_size += patch.GetHeaderSize();
+ }
+
+ size_t offset = total_header_size;
+
+ // Write out the headers.
+ if (!android::base::WriteStringToFd("IMGDIFF2", patch_fd)) {
+ printf("failed to write \"IMGDIFF2\": %s\n", strerror(errno));
+ return false;
+ }
+
+ Write4(patch_fd, static_cast<int32_t>(patch_chunks.size()));
+ for (size_t i = 0; i < patch_chunks.size(); ++i) {
+ printf("chunk %zu: ", i);
+ offset = patch_chunks[i].WriteHeaderToFd(patch_fd, offset);
+ }
+
+ // Append each chunk's bsdiff patch, in order.
+ for (const auto& patch : patch_chunks) {
+ if (patch.type_ == CHUNK_RAW) {
+ continue;
+ }
+ if (!android::base::WriteFully(patch_fd, patch.data_.data(), patch.data_.size())) {
+ printf("failed to write %zu bytes patch to patch_fd\n", patch.data_.size());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+ImageChunk& Image::operator[](size_t i) {
+ CHECK_LT(i, chunks_.size());
+ return chunks_[i];
+}
+
+const ImageChunk& Image::operator[](size_t i) const {
+ CHECK_LT(i, chunks_.size());
+ return chunks_[i];
+}
+
+void Image::MergeAdjacentNormalChunks() {
+ size_t merged_last = 0, cur = 0;
+ while (cur < chunks_.size()) {
+ // Look for normal chunks adjacent to the current one. If such chunk exists, extend the
+ // length of the current normal chunk.
+ size_t to_check = cur + 1;
+ while (to_check < chunks_.size() && chunks_[cur].IsAdjacentNormal(chunks_[to_check])) {
+ chunks_[cur].MergeAdjacentNormal(chunks_[to_check]);
+ to_check++;
+ }
+
+ if (merged_last != cur) {
+ chunks_[merged_last] = std::move(chunks_[cur]);
+ }
+ merged_last++;
+ cur = to_check;
+ }
+ if (merged_last < chunks_.size()) {
+ chunks_.erase(chunks_.begin() + merged_last, chunks_.end());
+ }
+}
+
+void Image::DumpChunks() const {
+ std::string type = is_source_ ? "source" : "target";
+ printf("Dumping chunks for %s\n", type.c_str());
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ printf("chunk %zu: ", i);
+ chunks_[i].Dump();
+ }
+}
+
+bool Image::ReadFile(const std::string& filename, std::vector<uint8_t>* file_content) {
+ CHECK(file_content != nullptr);
+
+ android::base::unique_fd fd(open(filename.c_str(), O_RDONLY));
+ if (fd == -1) {
+ printf("failed to open \"%s\" %s\n", filename.c_str(), strerror(errno));
+ return false;
+ }
+ struct stat st;
+ if (fstat(fd, &st) != 0) {
+ printf("failed to stat \"%s\": %s\n", filename.c_str(), strerror(errno));
+ return false;
+ }
+
+ size_t sz = static_cast<size_t>(st.st_size);
+ file_content->resize(sz);
+ if (!android::base::ReadFully(fd, file_content->data(), sz)) {
+ printf("failed to read \"%s\" %s\n", filename.c_str(), strerror(errno));
+ return false;
+ }
+ fd.reset();
+
+ return true;
+}
+
+bool ZipModeImage::Initialize(const std::string& filename) {
+ if (!ReadFile(filename, &file_content_)) {
+ return false;
+ }
+
+ // Omit the trailing zeros before we pass the file to ziparchive handler.
+ size_t zipfile_size;
+ if (!GetZipFileSize(&zipfile_size)) {
+ printf("failed to parse the actual size of %s\n", filename.c_str());
+ return false;
+ }
+ ZipArchiveHandle handle;
+ int err = OpenArchiveFromMemory(const_cast<uint8_t*>(file_content_.data()), zipfile_size,
+ filename.c_str(), &handle);
+ if (err != 0) {
+ printf("failed to open zip file %s: %s\n", filename.c_str(), ErrorCodeString(err));
+ CloseArchive(handle);
+ return false;
+ }
+
+ if (!InitializeChunks(filename, handle)) {
+ CloseArchive(handle);
+ return false;
+ }
+
+ CloseArchive(handle);
+ return true;
+}
+
+// Iterate the zip entries and compose the image chunks accordingly.
+bool ZipModeImage::InitializeChunks(const std::string& filename, ZipArchiveHandle handle) {
+ void* cookie;
+ int ret = StartIteration(handle, &cookie, nullptr, nullptr);
+ if (ret != 0) {
+ printf("failed to iterate over entries in %s: %s\n", filename.c_str(), ErrorCodeString(ret));
+ return false;
+ }
+
+ // Create a list of deflated zip entries, sorted by offset.
+ std::vector<std::pair<std::string, ZipEntry>> temp_entries;
+ ZipString name;
+ ZipEntry entry;
+ while ((ret = Next(cookie, &entry, &name)) == 0) {
+ if (entry.method == kCompressDeflated || limit_ > 0) {
+ std::string entry_name(name.name, name.name + name.name_length);
+ temp_entries.emplace_back(entry_name, entry);
+ }
+ }
+
+ if (ret != -1) {
+ printf("Error while iterating over zip entries: %s\n", ErrorCodeString(ret));
+ return false;
+ }
+ std::sort(temp_entries.begin(), temp_entries.end(),
+ [](auto& entry1, auto& entry2) { return entry1.second.offset < entry2.second.offset; });
+
+ EndIteration(cookie);
+
+ // For source chunks, we don't need to compose chunks for the metadata.
+ if (is_source_) {
+ for (auto& entry : temp_entries) {
+ if (!AddZipEntryToChunks(handle, entry.first, &entry.second)) {
+ printf("Failed to add %s to source chunks\n", entry.first.c_str());
+ return false;
+ }
+ }
+
+ // Add the end of zip file (mainly central directory) as a normal chunk.
+ size_t entries_end = 0;
+ if (!temp_entries.empty()) {
+ entries_end = static_cast<size_t>(temp_entries.back().second.offset +
+ temp_entries.back().second.compressed_length);
+ }
+ CHECK_LT(entries_end, file_content_.size());
+ chunks_.emplace_back(CHUNK_NORMAL, entries_end, &file_content_,
+ file_content_.size() - entries_end);
+
+ return true;
+ }
+
+ // For target chunks, add the deflate entries as CHUNK_DEFLATE and the contents between two
+ // deflate entries as CHUNK_NORMAL.
+ size_t pos = 0;
+ size_t nextentry = 0;
+ while (pos < file_content_.size()) {
+ if (nextentry < temp_entries.size() &&
+ static_cast<off64_t>(pos) == temp_entries[nextentry].second.offset) {
+ // Add the next zip entry.
+ std::string entry_name = temp_entries[nextentry].first;
+ if (!AddZipEntryToChunks(handle, entry_name, &temp_entries[nextentry].second)) {
+ printf("Failed to add %s to target chunks\n", entry_name.c_str());
+ return false;
+ }
+
+ pos += temp_entries[nextentry].second.compressed_length;
+ ++nextentry;
+ continue;
+ }
+
+ // Use a normal chunk to take all the data up to the start of the next entry.
+ size_t raw_data_len;
+ if (nextentry < temp_entries.size()) {
+ raw_data_len = temp_entries[nextentry].second.offset - pos;
+ } else {
+ raw_data_len = file_content_.size() - pos;
+ }
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, raw_data_len);
+
+ pos += raw_data_len;
+ }
+
+ return true;
+}
+
+bool ZipModeImage::AddZipEntryToChunks(ZipArchiveHandle handle, const std::string& entry_name,
+ ZipEntry* entry) {
+ size_t compressed_len = entry->compressed_length;
+ if (compressed_len == 0) return true;
+
+ // Split the entry into several normal chunks if it's too large.
+ if (limit_ > 0 && compressed_len > limit_) {
+ int count = 0;
+ while (compressed_len > 0) {
+ size_t length = std::min(limit_, compressed_len);
+ std::string name = entry_name + "-" + std::to_string(count);
+ chunks_.emplace_back(CHUNK_NORMAL, entry->offset + limit_ * count, &file_content_, length,
+ name);
+
+ count++;
+ compressed_len -= length;
+ }
+ } else if (entry->method == kCompressDeflated) {
+ size_t uncompressed_len = entry->uncompressed_length;
+ std::vector<uint8_t> uncompressed_data(uncompressed_len);
+ int ret = ExtractToMemory(handle, entry, uncompressed_data.data(), uncompressed_len);
+ if (ret != 0) {
+ printf("failed to extract %s with size %zu: %s\n", entry_name.c_str(), uncompressed_len,
+ ErrorCodeString(ret));
+ return false;
+ }
+ ImageChunk curr(CHUNK_DEFLATE, entry->offset, &file_content_, compressed_len, entry_name);
+ curr.SetUncompressedData(std::move(uncompressed_data));
+ chunks_.push_back(std::move(curr));
+ } else {
+ chunks_.emplace_back(CHUNK_NORMAL, entry->offset, &file_content_, compressed_len, entry_name);
+ }
+
+ return true;
+}
+
// EOCD record
// offset 0: signature 0x06054b50, 4 bytes
// offset 4: number of this disk, 2 bytes
// ...
// offset 20: comment length, 2 bytes
// offset 22: comment, n bytes
-static bool GetZipFileSize(const std::vector<uint8_t>& zip_file, size_t* input_file_size) {
- if (zip_file.size() < 22) {
+bool ZipModeImage::GetZipFileSize(size_t* input_file_size) {
+ if (file_content_.size() < 22) {
printf("file is too small to be a zip file\n");
return false;
}
// Look for End of central directory record of the zip file, and calculate the actual
// zip_file size.
- for (int i = zip_file.size() - 22; i >= 0; i--) {
- if (zip_file[i] == 0x50) {
- if (get_unaligned<uint32_t>(&zip_file[i]) == 0x06054b50) {
+ for (int i = file_content_.size() - 22; i >= 0; i--) {
+ if (file_content_[i] == 0x50) {
+ if (get_unaligned<uint32_t>(&file_content_[i]) == 0x06054b50) {
// double-check: this archive consists of a single "disk".
- CHECK_EQ(get_unaligned<uint16_t>(&zip_file[i + 4]), 0);
+ CHECK_EQ(get_unaligned<uint16_t>(&file_content_[i + 4]), 0);
- uint16_t comment_length = get_unaligned<uint16_t>(&zip_file[i + 20]);
+ uint16_t comment_length = get_unaligned<uint16_t>(&file_content_[i + 20]);
size_t file_size = i + 22 + comment_length;
- CHECK_LE(file_size, zip_file.size());
+ CHECK_LE(file_size, file_content_.size());
*input_file_size = file_size;
return true;
}
@@ -491,162 +783,409 @@
return false;
}
-static bool ReadZip(const char* filename, std::vector<ImageChunk>* chunks,
- std::vector<uint8_t>* zip_file, bool include_pseudo_chunk) {
- CHECK(chunks != nullptr && zip_file != nullptr);
+ImageChunk ZipModeImage::PseudoSource() const {
+ CHECK(is_source_);
+ return ImageChunk(CHUNK_NORMAL, 0, &file_content_, file_content_.size());
+}
- android::base::unique_fd fd(open(filename, O_RDONLY));
- if (fd == -1) {
- printf("failed to open \"%s\" %s\n", filename, strerror(errno));
- return false;
+const ImageChunk* ZipModeImage::FindChunkByName(const std::string& name, bool find_normal) const {
+ if (name.empty()) {
+ return nullptr;
}
- struct stat st;
- if (fstat(fd, &st) != 0) {
- printf("failed to stat \"%s\": %s\n", filename, strerror(errno));
- return false;
- }
-
- size_t sz = static_cast<size_t>(st.st_size);
- zip_file->resize(sz);
- if (!android::base::ReadFully(fd, zip_file->data(), sz)) {
- printf("failed to read \"%s\" %s\n", filename, strerror(errno));
- return false;
- }
- fd.reset();
-
- // Trim the trailing zeros before we pass the file to ziparchive handler.
- size_t zipfile_size;
- if (!GetZipFileSize(*zip_file, &zipfile_size)) {
- printf("failed to parse the actual size of %s\n", filename);
- return false;
- }
- ZipArchiveHandle handle;
- int err = OpenArchiveFromMemory(zip_file->data(), zipfile_size, filename, &handle);
- if (err != 0) {
- printf("failed to open zip file %s: %s\n", filename, ErrorCodeString(err));
- CloseArchive(handle);
- return false;
- }
-
- // Create a list of deflated zip entries, sorted by offset.
- std::vector<std::pair<std::string, ZipEntry>> temp_entries;
- void* cookie;
- int ret = StartIteration(handle, &cookie, nullptr, nullptr);
- if (ret != 0) {
- printf("failed to iterate over entries in %s: %s\n", filename, ErrorCodeString(ret));
- CloseArchive(handle);
- return false;
- }
-
- ZipString name;
- ZipEntry entry;
- while ((ret = Next(cookie, &entry, &name)) == 0) {
- if (entry.method == kCompressDeflated) {
- std::string entryname(name.name, name.name + name.name_length);
- temp_entries.push_back(std::make_pair(entryname, entry));
- }
- }
-
- if (ret != -1) {
- printf("Error while iterating over zip entries: %s\n", ErrorCodeString(ret));
- CloseArchive(handle);
- return false;
- }
- std::sort(temp_entries.begin(), temp_entries.end(),
- [](auto& entry1, auto& entry2) {
- return entry1.second.offset < entry2.second.offset;
- });
-
- EndIteration(cookie);
-
- if (include_pseudo_chunk) {
- chunks->emplace_back(CHUNK_NORMAL, 0, zip_file, zip_file->size());
- }
-
- size_t pos = 0;
- size_t nextentry = 0;
- while (pos < zip_file->size()) {
- if (nextentry < temp_entries.size() &&
- static_cast<off64_t>(pos) == temp_entries[nextentry].second.offset) {
- // compose the next deflate chunk.
- std::string entryname = temp_entries[nextentry].first;
- size_t uncompressed_len = temp_entries[nextentry].second.uncompressed_length;
- std::vector<uint8_t> uncompressed_data(uncompressed_len);
- if ((ret = ExtractToMemory(handle, &temp_entries[nextentry].second, uncompressed_data.data(),
- uncompressed_len)) != 0) {
- printf("failed to extract %s with size %zu: %s\n", entryname.c_str(), uncompressed_len,
- ErrorCodeString(ret));
- CloseArchive(handle);
- return false;
- }
-
- size_t compressed_len = temp_entries[nextentry].second.compressed_length;
- ImageChunk curr(CHUNK_DEFLATE, pos, zip_file, compressed_len);
- curr.SetEntryName(std::move(entryname));
- curr.SetUncompressedData(std::move(uncompressed_data));
- chunks->push_back(curr);
-
- pos += compressed_len;
- ++nextentry;
+ for (auto& chunk : chunks_) {
+ if (chunk.GetType() != CHUNK_DEFLATE && !find_normal) {
continue;
}
- // Use a normal chunk to take all the data up to the start of the next deflate section.
- size_t raw_data_len;
- if (nextentry < temp_entries.size()) {
- raw_data_len = temp_entries[nextentry].second.offset - pos;
- } else {
- raw_data_len = zip_file->size() - pos;
+ if (chunk.GetEntryName() == name) {
+ return &chunk;
}
- chunks->emplace_back(CHUNK_NORMAL, pos, zip_file, raw_data_len);
- pos += raw_data_len;
+ // Edge case when target chunk is split due to size limit but source chunk isn't.
+ if (name == (chunk.GetEntryName() + "-0") || chunk.GetEntryName() == (name + "-0")) {
+ return &chunk;
+ }
+
+ // TODO handle the .so files with incremental version number.
+ // (e.g. lib/arm64-v8a/libcronet.59.0.3050.4.so)
}
- CloseArchive(handle);
+ return nullptr;
+}
+
+ImageChunk* ZipModeImage::FindChunkByName(const std::string& name, bool find_normal) {
+ return const_cast<ImageChunk*>(
+ static_cast<const ZipModeImage*>(this)->FindChunkByName(name, find_normal));
+}
+
+bool ZipModeImage::CheckAndProcessChunks(ZipModeImage* tgt_image, ZipModeImage* src_image) {
+ for (auto& tgt_chunk : *tgt_image) {
+ if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
+ continue;
+ }
+
+ ImageChunk* src_chunk = src_image->FindChunkByName(tgt_chunk.GetEntryName());
+ if (src_chunk == nullptr) {
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ } else if (tgt_chunk == *src_chunk) {
+ // If two deflate chunks are identical (eg, the kernel has not changed between two builds),
+ // treat them as normal chunks. This makes applypatch much faster -- it can apply a trivial
+ // patch to the compressed data, rather than uncompressing and recompressing to apply the
+ // trivial patch to the uncompressed data.
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk->ChangeDeflateChunkToNormal();
+ } else if (!tgt_chunk.ReconstructDeflateChunk()) {
+ // We cannot recompress the data and get exactly the same bits as are in the input target
+ // image. Treat the chunk as a normal non-deflated chunk.
+ printf("failed to reconstruct target deflate chunk [%s]; treating as normal\n",
+ tgt_chunk.GetEntryName().c_str());
+
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk->ChangeDeflateChunkToNormal();
+ }
+ }
+
+ // For zips, we only need merge normal chunks for the target: deflated chunks are matched via
+ // filename, and normal chunks are patched using the entire source file as the source.
+ if (tgt_image->limit_ == 0) {
+ tgt_image->MergeAdjacentNormalChunks();
+ tgt_image->DumpChunks();
+ }
+
return true;
}
-// Read the given file and break it up into chunks, and putting the data in to a vector.
-static bool ReadImage(const char* filename, std::vector<ImageChunk>* chunks,
- std::vector<uint8_t>* img) {
- CHECK(chunks != nullptr && img != nullptr);
+// For each target chunk, look for the corresponding source chunk by the zip_entry name. If
+// found, add the range of this chunk in the original source file to the block aligned source
+// ranges. Construct the split src & tgt image once the size of source range reaches limit.
+bool ZipModeImage::SplitZipModeImageWithLimit(const ZipModeImage& tgt_image,
+ const ZipModeImage& src_image,
+ std::vector<ZipModeImage>* split_tgt_images,
+ std::vector<ZipModeImage>* split_src_images,
+ std::vector<SortedRangeSet>* split_src_ranges) {
+ CHECK_EQ(tgt_image.limit_, src_image.limit_);
+ size_t limit = tgt_image.limit_;
- android::base::unique_fd fd(open(filename, O_RDONLY));
- if (fd == -1) {
- printf("failed to open \"%s\" %s\n", filename, strerror(errno));
- return false;
+ src_image.DumpChunks();
+ printf("Splitting %zu tgt chunks...\n", tgt_image.NumOfChunks());
+
+ SortedRangeSet used_src_ranges; // ranges used for previous split source images.
+
+ // Reserve the central directory in advance for the last split image.
+ const auto& central_directory = src_image.cend() - 1;
+ CHECK_EQ(CHUNK_NORMAL, central_directory->GetType());
+ used_src_ranges.Insert(central_directory->GetStartOffset(),
+ central_directory->DataLengthForPatch());
+
+ SortedRangeSet src_ranges;
+ std::vector<ImageChunk> split_src_chunks;
+ std::vector<ImageChunk> split_tgt_chunks;
+ for (auto tgt = tgt_image.cbegin(); tgt != tgt_image.cend(); tgt++) {
+ const ImageChunk* src = src_image.FindChunkByName(tgt->GetEntryName(), true);
+ if (src == nullptr) {
+ split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
+ tgt->GetRawDataLength());
+ continue;
+ }
+
+ size_t src_offset = src->GetStartOffset();
+ size_t src_length = src->GetRawDataLength();
+
+ CHECK(src_length > 0);
+ CHECK_LE(src_length, limit);
+
+ // Make sure this source range hasn't been used before so that the src_range pieces don't
+ // overlap with each other.
+ if (!RemoveUsedBlocks(&src_offset, &src_length, used_src_ranges)) {
+ split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
+ tgt->GetRawDataLength());
+ } else if (src_ranges.blocks() * BLOCK_SIZE + src_length <= limit) {
+ src_ranges.Insert(src_offset, src_length);
+
+ // Add the deflate source chunk if it hasn't been aligned.
+ if (src->GetType() == CHUNK_DEFLATE && src_length == src->GetRawDataLength()) {
+ split_src_chunks.push_back(*src);
+ split_tgt_chunks.push_back(*tgt);
+ } else {
+ // TODO split smarter to avoid alignment of large deflate chunks
+ split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
+ tgt->GetRawDataLength());
+ }
+ } else {
+ ZipModeImage::AddSplitImageFromChunkList(tgt_image, src_image, src_ranges, split_tgt_chunks,
+ split_src_chunks, split_tgt_images,
+ split_src_images);
+
+ split_tgt_chunks.clear();
+ split_src_chunks.clear();
+ used_src_ranges.Insert(src_ranges);
+ split_src_ranges->push_back(std::move(src_ranges));
+ src_ranges.Clear();
+
+ // We don't have enough space for the current chunk; start a new split image and handle
+ // this chunk there.
+ tgt--;
+ }
}
- struct stat st;
- if (fstat(fd, &st) != 0) {
- printf("failed to stat \"%s\": %s\n", filename, strerror(errno));
+
+ // TODO Trim it in case the CD exceeds limit too much.
+ src_ranges.Insert(central_directory->GetStartOffset(), central_directory->DataLengthForPatch());
+ ZipModeImage::AddSplitImageFromChunkList(tgt_image, src_image, src_ranges, split_tgt_chunks,
+ split_src_chunks, split_tgt_images, split_src_images);
+ split_src_ranges->push_back(std::move(src_ranges));
+
+ ValidateSplitImages(*split_tgt_images, *split_src_images, *split_src_ranges,
+ tgt_image.file_content_.size());
+
+ return true;
+}
+
+void ZipModeImage::AddSplitImageFromChunkList(const ZipModeImage& tgt_image,
+ const ZipModeImage& src_image,
+ const SortedRangeSet& split_src_ranges,
+ const std::vector<ImageChunk>& split_tgt_chunks,
+ const std::vector<ImageChunk>& split_src_chunks,
+ std::vector<ZipModeImage>* split_tgt_images,
+ std::vector<ZipModeImage>* split_src_images) {
+ CHECK(!split_tgt_chunks.empty());
+ // Target chunks should occupy at least one block.
+ // TODO put a warning and change the type to raw if it happens in extremely rare cases.
+ size_t tgt_size = split_tgt_chunks.back().GetStartOffset() +
+ split_tgt_chunks.back().DataLengthForPatch() -
+ split_tgt_chunks.front().GetStartOffset();
+ CHECK_GE(tgt_size, BLOCK_SIZE);
+
+ std::vector<ImageChunk> aligned_tgt_chunks;
+
+ // Align the target chunks in the beginning with BLOCK_SIZE.
+ size_t i = 0;
+ while (i < split_tgt_chunks.size()) {
+ size_t tgt_start = split_tgt_chunks[i].GetStartOffset();
+ size_t tgt_length = split_tgt_chunks[i].GetRawDataLength();
+
+ // Current ImageChunk is long enough to align.
+ if (AlignHead(&tgt_start, &tgt_length)) {
+ aligned_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt_start, &tgt_image.file_content_,
+ tgt_length);
+ break;
+ }
+
+ i++;
+ }
+ CHECK_LT(i, split_tgt_chunks.size());
+ aligned_tgt_chunks.insert(aligned_tgt_chunks.end(), split_tgt_chunks.begin() + i + 1,
+ split_tgt_chunks.end());
+ CHECK(!aligned_tgt_chunks.empty());
+
+ // Add a normal chunk to align the contents in the end.
+ size_t end_offset =
+ aligned_tgt_chunks.back().GetStartOffset() + aligned_tgt_chunks.back().GetRawDataLength();
+ if (end_offset % BLOCK_SIZE != 0 && end_offset < tgt_image.file_content_.size()) {
+ aligned_tgt_chunks.emplace_back(CHUNK_NORMAL, end_offset, &tgt_image.file_content_,
+ BLOCK_SIZE - (end_offset % BLOCK_SIZE));
+ }
+
+ ZipModeImage split_tgt_image(false);
+ split_tgt_image.Initialize(std::move(aligned_tgt_chunks), {});
+ split_tgt_image.MergeAdjacentNormalChunks();
+
+ // Construct the dummy source file based on the src_ranges.
+ std::vector<uint8_t> src_content;
+ for (const auto& r : split_src_ranges) {
+ size_t end = std::min(src_image.file_content_.size(), r.second * BLOCK_SIZE);
+ src_content.insert(src_content.end(), src_image.file_content_.begin() + r.first * BLOCK_SIZE,
+ src_image.file_content_.begin() + end);
+ }
+
+ // We should not have an empty src in our design; otherwise we will encounter an error in
+ // bsdiff since src_content.data() == nullptr.
+ CHECK(!src_content.empty());
+
+ ZipModeImage split_src_image(true);
+ split_src_image.Initialize(split_src_chunks, std::move(src_content));
+
+ split_tgt_images->push_back(std::move(split_tgt_image));
+ split_src_images->push_back(std::move(split_src_image));
+}
+
+void ZipModeImage::ValidateSplitImages(const std::vector<ZipModeImage>& split_tgt_images,
+ const std::vector<ZipModeImage>& split_src_images,
+ std::vector<SortedRangeSet>& split_src_ranges,
+ size_t total_tgt_size) {
+ CHECK_EQ(split_tgt_images.size(), split_src_images.size());
+
+ printf("Validating %zu images\n", split_tgt_images.size());
+
+ // Verify that the target image pieces is continuous and can add up to the total size.
+ size_t last_offset = 0;
+ for (const auto& tgt_image : split_tgt_images) {
+ CHECK(!tgt_image.chunks_.empty());
+
+ CHECK_EQ(last_offset, tgt_image.chunks_.front().GetStartOffset());
+ CHECK(last_offset % BLOCK_SIZE == 0);
+
+ // Check the target chunks within the split image are continuous.
+ for (const auto& chunk : tgt_image.chunks_) {
+ CHECK_EQ(last_offset, chunk.GetStartOffset());
+ last_offset += chunk.GetRawDataLength();
+ }
+ }
+ CHECK_EQ(total_tgt_size, last_offset);
+
+ // Verify that the source ranges are mutually exclusive.
+ CHECK_EQ(split_src_images.size(), split_src_ranges.size());
+ SortedRangeSet used_src_ranges;
+ for (size_t i = 0; i < split_src_ranges.size(); i++) {
+ CHECK(!used_src_ranges.Overlaps(split_src_ranges[i]))
+ << "src range " << split_src_ranges[i].ToString() << " overlaps "
+ << used_src_ranges.ToString();
+ used_src_ranges.Insert(split_src_ranges[i]);
+ }
+}
+
+bool ZipModeImage::GeneratePatchesInternal(const ZipModeImage& tgt_image,
+ const ZipModeImage& src_image,
+ std::vector<PatchChunk>* patch_chunks) {
+ printf("Construct patches for %zu chunks...\n", tgt_image.NumOfChunks());
+ patch_chunks->clear();
+
+ saidx_t* bsdiff_cache = nullptr;
+ for (size_t i = 0; i < tgt_image.NumOfChunks(); i++) {
+ const auto& tgt_chunk = tgt_image[i];
+
+ if (PatchChunk::RawDataIsSmaller(tgt_chunk, 0)) {
+ patch_chunks->emplace_back(tgt_chunk);
+ continue;
+ }
+
+ const ImageChunk* src_chunk = (tgt_chunk.GetType() != CHUNK_DEFLATE)
+ ? nullptr
+ : src_image.FindChunkByName(tgt_chunk.GetEntryName());
+
+ const auto& src_ref = (src_chunk == nullptr) ? src_image.PseudoSource() : *src_chunk;
+ saidx_t** bsdiff_cache_ptr = (src_chunk == nullptr) ? &bsdiff_cache : nullptr;
+
+ std::vector<uint8_t> patch_data;
+ if (!ImageChunk::MakePatch(tgt_chunk, src_ref, &patch_data, bsdiff_cache_ptr)) {
+ printf("Failed to generate patch, name: %s\n", tgt_chunk.GetEntryName().c_str());
+ return false;
+ }
+
+ printf("patch %3zu is %zu bytes (of %zu)\n", i, patch_data.size(),
+ tgt_chunk.GetRawDataLength());
+
+ if (PatchChunk::RawDataIsSmaller(tgt_chunk, patch_data.size())) {
+ patch_chunks->emplace_back(tgt_chunk);
+ } else {
+ patch_chunks->emplace_back(tgt_chunk, src_ref, std::move(patch_data));
+ }
+ }
+ free(bsdiff_cache);
+
+ CHECK_EQ(patch_chunks->size(), tgt_image.NumOfChunks());
+ return true;
+}
+
+bool ZipModeImage::GeneratePatches(const ZipModeImage& tgt_image, const ZipModeImage& src_image,
+ const std::string& patch_name) {
+ std::vector<PatchChunk> patch_chunks;
+
+ ZipModeImage::GeneratePatchesInternal(tgt_image, src_image, &patch_chunks);
+
+ CHECK_EQ(tgt_image.NumOfChunks(), patch_chunks.size());
+
+ android::base::unique_fd patch_fd(
+ open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
+ if (patch_fd == -1) {
+ printf("failed to open \"%s\": %s\n", patch_name.c_str(), strerror(errno));
return false;
}
- size_t sz = static_cast<size_t>(st.st_size);
- img->resize(sz);
- if (!android::base::ReadFully(fd, img->data(), sz)) {
- printf("failed to read \"%s\" %s\n", filename, strerror(errno));
+ return PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd);
+}
+
+bool ZipModeImage::GeneratePatches(const std::vector<ZipModeImage>& split_tgt_images,
+ const std::vector<ZipModeImage>& split_src_images,
+ const std::vector<SortedRangeSet>& split_src_ranges,
+ const std::string& patch_name, const std::string& debug_dir) {
+ printf("Construct patches for %zu split images...\n", split_tgt_images.size());
+
+ android::base::unique_fd patch_fd(
+ open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
+ if (patch_fd == -1) {
+ printf("failed to open \"%s\": %s\n", patch_name.c_str(), strerror(errno));
return false;
}
+ for (size_t i = 0; i < split_tgt_images.size(); i++) {
+ std::vector<PatchChunk> patch_chunks;
+ if (!ZipModeImage::GeneratePatchesInternal(split_tgt_images[i], split_src_images[i],
+ &patch_chunks)) {
+ printf("failed to generate split patch\n");
+ return false;
+ }
+
+ for (auto& p : patch_chunks) {
+ p.UpdateSourceOffset(split_src_ranges[i]);
+ }
+
+ if (!PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd)) {
+ return false;
+ }
+
+ // Write the split source & patch into the debug directory.
+ if (!debug_dir.empty()) {
+ std::string src_name = android::base::StringPrintf("%s/src-%zu", debug_dir.c_str(), i);
+ android::base::unique_fd fd(
+ open(src_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
+
+ if (fd == -1) {
+ printf("Failed to open %s\n", src_name.c_str());
+ return false;
+ }
+ if (!android::base::WriteFully(fd, split_src_images[i].PseudoSource().DataForPatch(),
+ split_src_images[i].PseudoSource().DataLengthForPatch())) {
+ printf("Failed to write split source data into %s\n", src_name.c_str());
+ return false;
+ }
+
+ std::string patch_name = android::base::StringPrintf("%s/patch-%zu", debug_dir.c_str(), i);
+ fd.reset(open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
+
+ if (fd == -1) {
+ printf("Failed to open %s\n", patch_name.c_str());
+ return false;
+ }
+ if (!PatchChunk::WritePatchDataToFd(patch_chunks, fd)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool ImageModeImage::Initialize(const std::string& filename) {
+ if (!ReadFile(filename, &file_content_)) {
+ return false;
+ }
+
+ size_t sz = file_content_.size();
size_t pos = 0;
-
while (pos < sz) {
// 0x00 no header flags, 0x08 deflate compression, 0x1f8b gzip magic number
- if (sz - pos >= 4 && get_unaligned<uint32_t>(img->data() + pos) == 0x00088b1f) {
+ if (sz - pos >= 4 && get_unaligned<uint32_t>(file_content_.data() + pos) == 0x00088b1f) {
// 'pos' is the offset of the start of a gzip chunk.
size_t chunk_offset = pos;
// The remaining data is too small to be a gzip chunk; treat them as a normal chunk.
if (sz - pos < GZIP_HEADER_LEN + GZIP_FOOTER_LEN) {
- chunks->emplace_back(CHUNK_NORMAL, pos, img, sz - pos);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, sz - pos);
break;
}
// We need three chunks for the deflated image in total, one normal chunk for the header,
// one deflated chunk for the body, and another normal chunk for the footer.
- chunks->emplace_back(CHUNK_NORMAL, pos, img, GZIP_HEADER_LEN);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_HEADER_LEN);
pos += GZIP_HEADER_LEN;
// We must decompress this chunk in order to discover where it ends, and so we can update
@@ -657,7 +1196,7 @@
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = sz - pos;
- strm.next_in = img->data() + pos;
+ strm.next_in = file_content_.data() + pos;
// -15 means we are decoding a 'raw' deflate stream; zlib will
// not expect zlib headers.
@@ -700,22 +1239,22 @@
printf("Warning: invalid footer position; treating as a nomal chunk\n");
continue;
}
- size_t footer_size = get_unaligned<uint32_t>(img->data() + footer_index);
+ size_t footer_size = get_unaligned<uint32_t>(file_content_.data() + footer_index);
if (footer_size != uncompressed_len) {
printf("Warning: footer size %zu != decompressed size %zu; treating as a nomal chunk\n",
footer_size, uncompressed_len);
continue;
}
- ImageChunk body(CHUNK_DEFLATE, pos, img, raw_data_len);
+ ImageChunk body(CHUNK_DEFLATE, pos, &file_content_, raw_data_len);
uncompressed_data.resize(uncompressed_len);
body.SetUncompressedData(std::move(uncompressed_data));
- chunks->push_back(body);
+ chunks_.push_back(std::move(body));
pos += raw_data_len;
// create a normal chunk for the footer
- chunks->emplace_back(CHUNK_NORMAL, pos, img, GZIP_FOOTER_LEN);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_FOOTER_LEN);
pos += GZIP_FOOTER_LEN;
} else {
@@ -726,12 +1265,12 @@
size_t data_len = 0;
while (data_len + pos < sz) {
if (data_len + pos + 4 <= sz &&
- get_unaligned<uint32_t>(img->data() + pos + data_len) == 0x00088b1f) {
+ get_unaligned<uint32_t>(file_content_.data() + pos + data_len) == 0x00088b1f) {
break;
}
data_len++;
}
- chunks->emplace_back(CHUNK_NORMAL, pos, img, data_len);
+ chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, data_len);
pos += data_len;
}
@@ -740,346 +1279,238 @@
return true;
}
-/*
- * Given source and target chunks, compute a bsdiff patch between them.
- * Store the result in the patch_data.
- * |bsdiff_cache| can be used to cache the suffix array if the same |src| chunk
- * is used repeatedly, pass nullptr if not needed.
- */
-static bool MakePatch(const ImageChunk* src, ImageChunk* tgt, std::vector<uint8_t>* patch_data,
- saidx_t** bsdiff_cache) {
- if (tgt->ChangeChunkToRaw(0)) {
- size_t patch_size = tgt->DataLengthForPatch();
- patch_data->resize(patch_size);
- std::copy(tgt->DataForPatch(), tgt->DataForPatch() + patch_size, patch_data->begin());
- return true;
- }
-
-#if defined(__ANDROID__)
- char ptemp[] = "/data/local/tmp/imgdiff-patch-XXXXXX";
-#else
- char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
-#endif
-
- int fd = mkstemp(ptemp);
- if (fd == -1) {
- printf("MakePatch failed to create a temporary file: %s\n", strerror(errno));
- return false;
- }
- close(fd);
-
- int r = bsdiff::bsdiff(src->DataForPatch(), src->DataLengthForPatch(), tgt->DataForPatch(),
- tgt->DataLengthForPatch(), ptemp, bsdiff_cache);
- if (r != 0) {
- printf("bsdiff() failed: %d\n", r);
+bool ImageModeImage::SetBonusData(const std::vector<uint8_t>& bonus_data) {
+ CHECK(is_source_);
+ if (chunks_.size() < 2 || !chunks_[1].SetBonusData(bonus_data)) {
+ printf("Failed to set bonus data\n");
+ DumpChunks();
return false;
}
- android::base::unique_fd patch_fd(open(ptemp, O_RDONLY));
- if (patch_fd == -1) {
- printf("failed to open %s: %s\n", ptemp, strerror(errno));
+ printf(" using %zu bytes of bonus data\n", bonus_data.size());
+ return true;
+}
+
+// In Image Mode, verify that the source and target images have the same chunk structure (ie, the
+// same sequence of deflate and normal chunks).
+bool ImageModeImage::CheckAndProcessChunks(ImageModeImage* tgt_image, ImageModeImage* src_image) {
+ // In image mode, merge the gzip header and footer in with any adjacent normal chunks.
+ tgt_image->MergeAdjacentNormalChunks();
+ src_image->MergeAdjacentNormalChunks();
+
+ if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
+ printf("source and target don't have same number of chunks!\n");
+ tgt_image->DumpChunks();
+ src_image->DumpChunks();
return false;
}
- struct stat st;
- if (fstat(patch_fd, &st) != 0) {
- printf("failed to stat patch file %s: %s\n", ptemp, strerror(errno));
- return false;
+ for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
+ if ((*tgt_image)[i].GetType() != (*src_image)[i].GetType()) {
+ printf("source and target don't have same chunk structure! (chunk %zu)\n", i);
+ tgt_image->DumpChunks();
+ src_image->DumpChunks();
+ return false;
+ }
}
- size_t sz = static_cast<size_t>(st.st_size);
- // Change the chunk type to raw if the patch takes less space that way.
- if (tgt->ChangeChunkToRaw(sz)) {
- unlink(ptemp);
- size_t patch_size = tgt->DataLengthForPatch();
- patch_data->resize(patch_size);
- std::copy(tgt->DataForPatch(), tgt->DataForPatch() + patch_size, patch_data->begin());
- return true;
- }
- patch_data->resize(sz);
- if (!android::base::ReadFully(patch_fd, patch_data->data(), sz)) {
- printf("failed to read \"%s\" %s\n", ptemp, strerror(errno));
- return false;
+ for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
+ auto& tgt_chunk = (*tgt_image)[i];
+ auto& src_chunk = (*src_image)[i];
+ if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
+ continue;
+ }
+
+ // If two deflate chunks are identical treat them as normal chunks.
+ if (tgt_chunk == src_chunk) {
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk.ChangeDeflateChunkToNormal();
+ } else if (!tgt_chunk.ReconstructDeflateChunk()) {
+ // We cannot recompress the data and get exactly the same bits as are in the input target
+ // image, fall back to normal
+ printf("failed to reconstruct target deflate chunk %zu [%s]; treating as normal\n", i,
+ tgt_chunk.GetEntryName().c_str());
+ tgt_chunk.ChangeDeflateChunkToNormal();
+ src_chunk.ChangeDeflateChunkToNormal();
+ }
}
- unlink(ptemp);
- tgt->SetSourceInfo(*src);
+ // For images, we need to maintain the parallel structure of the chunk lists, so do the merging
+ // in both the source and target lists.
+ tgt_image->MergeAdjacentNormalChunks();
+ src_image->MergeAdjacentNormalChunks();
+ if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
+ // This shouldn't happen.
+ printf("merging normal chunks went awry\n");
+ return false;
+ }
return true;
}
-/*
- * Look for runs of adjacent normal chunks and compress them down into
- * a single chunk. (Such runs can be produced when deflate chunks are
- * changed to normal chunks.)
- */
-static void MergeAdjacentNormalChunks(std::vector<ImageChunk>* chunks) {
- size_t merged_last = 0, cur = 0;
- while (cur < chunks->size()) {
- // Look for normal chunks adjacent to the current one. If such chunk exists, extend the
- // length of the current normal chunk.
- size_t to_check = cur + 1;
- while (to_check < chunks->size() && chunks->at(cur).IsAdjacentNormal(chunks->at(to_check))) {
- chunks->at(cur).MergeAdjacentNormal(chunks->at(to_check));
- to_check++;
+// In image mode, generate patches against the given source chunks and bonus_data; write the
+// result to |patch_name|.
+bool ImageModeImage::GeneratePatches(const ImageModeImage& tgt_image,
+ const ImageModeImage& src_image,
+ const std::string& patch_name) {
+ printf("Construct patches for %zu chunks...\n", tgt_image.NumOfChunks());
+ std::vector<PatchChunk> patch_chunks;
+ patch_chunks.reserve(tgt_image.NumOfChunks());
+
+ for (size_t i = 0; i < tgt_image.NumOfChunks(); i++) {
+ const auto& tgt_chunk = tgt_image[i];
+ const auto& src_chunk = src_image[i];
+
+ if (PatchChunk::RawDataIsSmaller(tgt_chunk, 0)) {
+ patch_chunks.emplace_back(tgt_chunk);
+ continue;
}
- if (merged_last != cur) {
- chunks->at(merged_last) = std::move(chunks->at(cur));
+ std::vector<uint8_t> patch_data;
+ if (!ImageChunk::MakePatch(tgt_chunk, src_chunk, &patch_data, nullptr)) {
+ printf("Failed to generate patch for target chunk %zu: ", i);
+ return false;
}
- merged_last++;
- cur = to_check;
- }
- if (merged_last < chunks->size()) {
- chunks->erase(chunks->begin() + merged_last, chunks->end());
- }
-}
+ printf("patch %3zu is %zu bytes (of %zu)\n", i, patch_data.size(),
+ tgt_chunk.GetRawDataLength());
-static ImageChunk* FindChunkByName(const std::string& name, std::vector<ImageChunk>& chunks) {
- for (size_t i = 0; i < chunks.size(); ++i) {
- if (chunks[i].GetType() == CHUNK_DEFLATE && chunks[i].GetEntryName() == name) {
- return &chunks[i];
+ if (PatchChunk::RawDataIsSmaller(tgt_chunk, patch_data.size())) {
+ patch_chunks.emplace_back(tgt_chunk);
+ } else {
+ patch_chunks.emplace_back(tgt_chunk, src_chunk, std::move(patch_data));
}
}
- return nullptr;
-}
-static void DumpChunks(const std::vector<ImageChunk>& chunks) {
- for (size_t i = 0; i < chunks.size(); ++i) {
- printf("chunk %zu: ", i);
- chunks[i].Dump();
+ CHECK_EQ(tgt_image.NumOfChunks(), patch_chunks.size());
+
+ android::base::unique_fd patch_fd(
+ open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
+ if (patch_fd == -1) {
+ printf("failed to open \"%s\": %s\n", patch_name.c_str(), strerror(errno));
+ return false;
}
+
+ return PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd);
}
int imgdiff(int argc, const char** argv) {
bool zip_mode = false;
-
- if (argc >= 2 && strcmp(argv[1], "-z") == 0) {
- zip_mode = true;
- --argc;
- ++argv;
- }
-
std::vector<uint8_t> bonus_data;
- if (argc >= 3 && strcmp(argv[1], "-b") == 0) {
- android::base::unique_fd fd(open(argv[2], O_RDONLY));
- if (fd == -1) {
- printf("failed to open bonus file %s: %s\n", argv[2], strerror(errno));
- return 1;
- }
- struct stat st;
- if (fstat(fd, &st) != 0) {
- printf("failed to stat bonus file %s: %s\n", argv[2], strerror(errno));
- return 1;
- }
+ size_t blocks_limit = 0;
+ std::string debug_dir;
- size_t bonus_size = st.st_size;
- bonus_data.resize(bonus_size);
- if (!android::base::ReadFully(fd, bonus_data.data(), bonus_size)) {
- printf("failed to read bonus file %s: %s\n", argv[2], strerror(errno));
- return 1;
- }
+ int opt;
+ int option_index;
+ optind = 1; // Reset the getopt state so that we can call it multiple times for test.
- argc -= 2;
- argv += 2;
+ while ((opt = getopt_long(argc, const_cast<char**>(argv), "zb:", OPTIONS, &option_index)) != -1) {
+ switch (opt) {
+ case 'z':
+ zip_mode = true;
+ break;
+ case 'b': {
+ android::base::unique_fd fd(open(optarg, O_RDONLY));
+ if (fd == -1) {
+ printf("failed to open bonus file %s: %s\n", optarg, strerror(errno));
+ return 1;
+ }
+ struct stat st;
+ if (fstat(fd, &st) != 0) {
+ printf("failed to stat bonus file %s: %s\n", optarg, strerror(errno));
+ return 1;
+ }
+
+ size_t bonus_size = st.st_size;
+ bonus_data.resize(bonus_size);
+ if (!android::base::ReadFully(fd, bonus_data.data(), bonus_size)) {
+ printf("failed to read bonus file %s: %s\n", optarg, strerror(errno));
+ return 1;
+ }
+ break;
+ }
+ case 0: {
+ std::string name = OPTIONS[option_index].name;
+ if (name == "block-limit" && !android::base::ParseUint(optarg, &blocks_limit)) {
+ printf("failed to parse size blocks_limit: %s\n", optarg);
+ return 1;
+ } else if (name == "debug-dir") {
+ debug_dir = optarg;
+ }
+ break;
+ }
+ default:
+ printf("unexpected opt: %s\n", optarg);
+ return 2;
+ }
}
- if (argc != 4) {
- printf("usage: %s [-z] [-b <bonus-file>] <src-img> <tgt-img> <patch-file>\n",
- argv[0]);
+ if (argc - optind != 3) {
+ printf("usage: %s [options] <src-img> <tgt-img> <patch-file>\n", argv[0]);
+ printf(
+ " -z <zip-mode>, Generate patches in zip mode, src and tgt should be zip files.\n"
+ " -b <bonus-file>, Bonus file in addition to src, image mode only.\n"
+ " --block-limit, For large zips, split the src and tgt based on the block limit;\n"
+ " and generate patches between each pair of pieces. Concatenate these\n"
+ " patches together and output them into <patch-file>.\n"
+ " --debug_dir, Debug directory to put the split srcs and patches, zip mode only.\n");
return 2;
}
- std::vector<ImageChunk> src_chunks;
- std::vector<ImageChunk> tgt_chunks;
- std::vector<uint8_t> src_file;
- std::vector<uint8_t> tgt_file;
-
if (zip_mode) {
- if (!ReadZip(argv[1], &src_chunks, &src_file, true)) {
- printf("failed to break apart source zip file\n");
+ ZipModeImage src_image(true, blocks_limit * BLOCK_SIZE);
+ ZipModeImage tgt_image(false, blocks_limit * BLOCK_SIZE);
+
+ if (!src_image.Initialize(argv[optind])) {
return 1;
}
- if (!ReadZip(argv[2], &tgt_chunks, &tgt_file, false)) {
- printf("failed to break apart target zip file\n");
+ if (!tgt_image.Initialize(argv[optind + 1])) {
+ return 1;
+ }
+
+ if (!ZipModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
+ return 1;
+ }
+
+ // TODO save and output the split information so that caller can create split transfer lists
+ // accordingly.
+
+ // Compute bsdiff patches for each chunk's data (the uncompressed data, in the case of
+ // deflate chunks).
+ if (blocks_limit > 0) {
+ std::vector<ZipModeImage> split_tgt_images;
+ std::vector<ZipModeImage> split_src_images;
+ std::vector<SortedRangeSet> split_src_ranges;
+ ZipModeImage::SplitZipModeImageWithLimit(tgt_image, src_image, &split_tgt_images,
+ &split_src_images, &split_src_ranges);
+
+ if (!ZipModeImage::GeneratePatches(split_tgt_images, split_src_images, split_src_ranges,
+ argv[optind + 2], debug_dir)) {
+ return 1;
+ }
+
+ } else if (!ZipModeImage::GeneratePatches(tgt_image, src_image, argv[optind + 2])) {
return 1;
}
} else {
- if (!ReadImage(argv[1], &src_chunks, &src_file)) {
- printf("failed to break apart source image\n");
+ ImageModeImage src_image(true);
+ ImageModeImage tgt_image(false);
+
+ if (!src_image.Initialize(argv[optind])) {
return 1;
}
- if (!ReadImage(argv[2], &tgt_chunks, &tgt_file)) {
- printf("failed to break apart target image\n");
+ if (!tgt_image.Initialize(argv[optind + 1])) {
return 1;
}
- // Verify that the source and target images have the same chunk
- // structure (ie, the same sequence of deflate and normal chunks).
-
- // Merge the gzip header and footer in with any adjacent normal chunks.
- MergeAdjacentNormalChunks(&tgt_chunks);
- MergeAdjacentNormalChunks(&src_chunks);
-
- if (src_chunks.size() != tgt_chunks.size()) {
- printf("source and target don't have same number of chunks!\n");
- printf("source chunks:\n");
- DumpChunks(src_chunks);
- printf("target chunks:\n");
- DumpChunks(tgt_chunks);
+ if (!ImageModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
return 1;
}
- for (size_t i = 0; i < src_chunks.size(); ++i) {
- if (src_chunks[i].GetType() != tgt_chunks[i].GetType()) {
- printf("source and target don't have same chunk structure! (chunk %zu)\n", i);
- printf("source chunks:\n");
- DumpChunks(src_chunks);
- printf("target chunks:\n");
- DumpChunks(tgt_chunks);
- return 1;
- }
- }
- }
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- if (tgt_chunks[i].GetType() == CHUNK_DEFLATE) {
- // Confirm that given the uncompressed chunk data in the target, we
- // can recompress it and get exactly the same bits as are in the
- // input target image. If this fails, treat the chunk as a normal
- // non-deflated chunk.
- if (!tgt_chunks[i].ReconstructDeflateChunk()) {
- printf("failed to reconstruct target deflate chunk %zu [%s]; treating as normal\n", i,
- tgt_chunks[i].GetEntryName().c_str());
- tgt_chunks[i].ChangeDeflateChunkToNormal();
- if (zip_mode) {
- ImageChunk* src = FindChunkByName(tgt_chunks[i].GetEntryName(), src_chunks);
- if (src != nullptr) {
- src->ChangeDeflateChunkToNormal();
- }
- } else {
- src_chunks[i].ChangeDeflateChunkToNormal();
- }
- continue;
- }
-
- // If two deflate chunks are identical (eg, the kernel has not
- // changed between two builds), treat them as normal chunks.
- // This makes applypatch much faster -- it can apply a trivial
- // patch to the compressed data, rather than uncompressing and
- // recompressing to apply the trivial patch to the uncompressed
- // data.
- ImageChunk* src;
- if (zip_mode) {
- src = FindChunkByName(tgt_chunks[i].GetEntryName(), src_chunks);
- } else {
- src = &src_chunks[i];
- }
-
- if (src == nullptr) {
- tgt_chunks[i].ChangeDeflateChunkToNormal();
- } else if (tgt_chunks[i] == *src) {
- tgt_chunks[i].ChangeDeflateChunkToNormal();
- src->ChangeDeflateChunkToNormal();
- }
- }
- }
-
- // Merging neighboring normal chunks.
- if (zip_mode) {
- // For zips, we only need to do this to the target: deflated
- // chunks are matched via filename, and normal chunks are patched
- // using the entire source file as the source.
- MergeAdjacentNormalChunks(&tgt_chunks);
-
- } else {
- // For images, we need to maintain the parallel structure of the
- // chunk lists, so do the merging in both the source and target
- // lists.
- MergeAdjacentNormalChunks(&tgt_chunks);
- MergeAdjacentNormalChunks(&src_chunks);
- if (src_chunks.size() != tgt_chunks.size()) {
- // This shouldn't happen.
- printf("merging normal chunks went awry\n");
+ if (!bonus_data.empty() && !src_image.SetBonusData(bonus_data)) {
return 1;
}
- }
- // Compute bsdiff patches for each chunk's data (the uncompressed
- // data, in the case of deflate chunks).
-
- DumpChunks(src_chunks);
-
- printf("Construct patches for %zu chunks...\n", tgt_chunks.size());
- std::vector<std::vector<uint8_t>> patch_data(tgt_chunks.size());
- saidx_t* bsdiff_cache = nullptr;
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- if (zip_mode) {
- ImageChunk* src;
- if (tgt_chunks[i].GetType() == CHUNK_DEFLATE &&
- (src = FindChunkByName(tgt_chunks[i].GetEntryName(), src_chunks))) {
- if (!MakePatch(src, &tgt_chunks[i], &patch_data[i], nullptr)) {
- printf("Failed to generate patch for target chunk %zu: ", i);
- return 1;
- }
- } else {
- if (!MakePatch(&src_chunks[0], &tgt_chunks[i], &patch_data[i], &bsdiff_cache)) {
- printf("Failed to generate patch for target chunk %zu: ", i);
- return 1;
- }
- }
- } else {
- if (i == 1 && !bonus_data.empty()) {
- printf(" using %zu bytes of bonus data for chunk %zu\n", bonus_data.size(), i);
- src_chunks[i].SetBonusData(bonus_data);
- }
-
- if (!MakePatch(&src_chunks[i], &tgt_chunks[i], &patch_data[i], nullptr)) {
- printf("Failed to generate patch for target chunk %zu: ", i);
- return 1;
- }
- }
- printf("patch %3zu is %zu bytes (of %zu)\n", i, patch_data[i].size(),
- src_chunks[i].GetRawDataLength());
- }
-
- if (bsdiff_cache != nullptr) {
- free(bsdiff_cache);
- }
-
- // Figure out how big the imgdiff file header is going to be, so
- // that we can correctly compute the offset of each bsdiff patch
- // within the file.
-
- size_t total_header_size = 12;
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- total_header_size += tgt_chunks[i].GetHeaderSize(patch_data[i].size());
- }
-
- size_t offset = total_header_size;
-
- android::base::unique_fd patch_fd(open(argv[3], O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
- if (patch_fd == -1) {
- printf("failed to open \"%s\": %s\n", argv[3], strerror(errno));
- return 1;
- }
-
- // Write out the headers.
- if (!android::base::WriteStringToFd("IMGDIFF2", patch_fd)) {
- printf("failed to write \"IMGDIFF2\" to \"%s\": %s\n", argv[3], strerror(errno));
- return 1;
- }
- Write4(patch_fd, static_cast<int32_t>(tgt_chunks.size()));
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- printf("chunk %zu: ", i);
- offset = tgt_chunks[i].WriteHeaderToFd(patch_fd, patch_data[i], offset);
- }
-
- // Append each chunk's bsdiff patch, in order.
- for (size_t i = 0; i < tgt_chunks.size(); ++i) {
- if (tgt_chunks[i].GetType() != CHUNK_RAW) {
- if (!android::base::WriteFully(patch_fd, patch_data[i].data(), patch_data[i].size())) {
- CHECK(false) << "failed to write " << patch_data[i].size() << " bytes patch for chunk "
- << i;
- }
+ if (!ImageModeImage::GeneratePatches(tgt_image, src_image, argv[optind + 2])) {
+ return 1;
}
}
diff --git a/applypatch/include/applypatch/imgdiff_image.h b/applypatch/include/applypatch/imgdiff_image.h
new file mode 100644
index 0000000..9fb844b
--- /dev/null
+++ b/applypatch/include/applypatch/imgdiff_image.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _APPLYPATCH_IMGDIFF_IMAGE_H
+#define _APPLYPATCH_IMGDIFF_IMAGE_H
+
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+#include <string>
+#include <vector>
+
+#include <bsdiff.h>
+#include <ziparchive/zip_archive.h>
+#include <zlib.h>
+
+#include "imgdiff.h"
+#include "rangeset.h"
+
+class ImageChunk {
+ public:
+ static constexpr auto WINDOWBITS = -15; // 32kb window; negative to indicate a raw stream.
+ static constexpr auto MEMLEVEL = 8; // the default value.
+ static constexpr auto METHOD = Z_DEFLATED;
+ static constexpr auto STRATEGY = Z_DEFAULT_STRATEGY;
+
+ ImageChunk(int type, size_t start, const std::vector<uint8_t>* file_content, size_t raw_data_len,
+ std::string entry_name = {});
+
+ int GetType() const {
+ return type_;
+ }
+ size_t GetRawDataLength() const {
+ return raw_data_len_;
+ }
+ const std::string& GetEntryName() const {
+ return entry_name_;
+ }
+ size_t GetStartOffset() const {
+ return start_;
+ }
+ int GetCompressLevel() const {
+ return compress_level_;
+ }
+
+ // CHUNK_DEFLATE will return the uncompressed data for diff, while other types will simply return
+ // the raw data.
+ const uint8_t* DataForPatch() const;
+ size_t DataLengthForPatch() const;
+
+ void Dump() const {
+ printf("type: %d, start: %zu, len: %zu, name: %s\n", type_, start_, DataLengthForPatch(),
+ entry_name_.c_str());
+ }
+
+ void SetUncompressedData(std::vector<uint8_t> data);
+ bool SetBonusData(const std::vector<uint8_t>& bonus_data);
+
+ bool operator==(const ImageChunk& other) const;
+ bool operator!=(const ImageChunk& other) const {
+ return !(*this == other);
+ }
+
+ /*
+ * Cause a gzip chunk to be treated as a normal chunk (ie, as a blob of uninterpreted data).
+ * The resulting patch will likely be about as big as the target file, but it lets us handle
+ * the case of images where some gzip chunks are reconstructible but others aren't (by treating
+ * the ones that aren't as normal chunks).
+ */
+ void ChangeDeflateChunkToNormal();
+
+ /*
+ * Verify that we can reproduce exactly the same compressed data that we started with. Sets the
+ * level, method, windowBits, memLevel, and strategy fields in the chunk to the encoding
+ * parameters needed to produce the right output.
+ */
+ bool ReconstructDeflateChunk();
+ bool IsAdjacentNormal(const ImageChunk& other) const;
+ void MergeAdjacentNormal(const ImageChunk& other);
+
+ /*
+ * Compute a bsdiff patch between |src| and |tgt|; Store the result in the patch_data.
+ * |bsdiff_cache| can be used to cache the suffix array if the same |src| chunk is used
+ * repeatedly, pass nullptr if not needed.
+ */
+ static bool MakePatch(const ImageChunk& tgt, const ImageChunk& src,
+ std::vector<uint8_t>* patch_data, saidx_t** bsdiff_cache);
+
+ private:
+ const uint8_t* GetRawData() const;
+ bool TryReconstruction(int level);
+
+ int type_; // CHUNK_NORMAL, CHUNK_DEFLATE, CHUNK_RAW
+ size_t start_; // offset of chunk in the original input file
+ const std::vector<uint8_t>* input_file_ptr_; // ptr to the full content of original input file
+ size_t raw_data_len_;
+
+ // deflate encoder parameters
+ int compress_level_;
+
+ // --- for CHUNK_DEFLATE chunks only: ---
+ std::vector<uint8_t> uncompressed_data_;
+ std::string entry_name_; // used for zip entries
+};
+
+// PatchChunk stores the patch data between a source chunk and a target chunk. It also keeps track
+// of the metadata of src&tgt chunks (e.g. offset, raw data length, uncompressed data length).
+class PatchChunk {
+ public:
+ PatchChunk(const ImageChunk& tgt, const ImageChunk& src, std::vector<uint8_t> data);
+
+ // Construct a CHUNK_RAW patch from the target data directly.
+ explicit PatchChunk(const ImageChunk& tgt);
+
+ // Return true if raw data size is smaller than the patch size.
+ static bool RawDataIsSmaller(const ImageChunk& tgt, size_t patch_size);
+
+ // Update the source start with the new offset within the source range.
+ void UpdateSourceOffset(const SortedRangeSet& src_range);
+
+ static bool WritePatchDataToFd(const std::vector<PatchChunk>& patch_chunks, int patch_fd);
+
+ private:
+ size_t GetHeaderSize() const;
+ size_t WriteHeaderToFd(int fd, size_t offset) const;
+
+ // The patch chunk type is the same as the target chunk type. The only exception is we change
+ // the |type_| to CHUNK_RAW if target length is smaller than the patch size.
+ int type_;
+
+ size_t source_start_;
+ size_t source_len_;
+ size_t source_uncompressed_len_;
+
+ size_t target_start_; // offset of the target chunk within the target file
+ size_t target_len_;
+ size_t target_uncompressed_len_;
+ size_t target_compress_level_; // the deflate compression level of the target chunk.
+
+ std::vector<uint8_t> data_; // storage for the patch data
+};
+
+// Interface for zip_mode and image_mode images. We initialize the image from an input file and
+// split the file content into a list of image chunks.
+class Image {
+ public:
+ explicit Image(bool is_source) : is_source_(is_source) {}
+
+ virtual ~Image() {}
+
+ // Create a list of image chunks from input file.
+ virtual bool Initialize(const std::string& filename) = 0;
+
+ // Look for runs of adjacent normal chunks and compress them down into a single chunk. (Such
+ // runs can be produced when deflate chunks are changed to normal chunks.)
+ void MergeAdjacentNormalChunks();
+
+ void DumpChunks() const;
+
+ // Non const iterators to access the stored ImageChunks.
+ std::vector<ImageChunk>::iterator begin() {
+ return chunks_.begin();
+ }
+
+ std::vector<ImageChunk>::iterator end() {
+ return chunks_.end();
+ }
+
+ std::vector<ImageChunk>::const_iterator cbegin() const {
+ return chunks_.cbegin();
+ }
+
+ std::vector<ImageChunk>::const_iterator cend() const {
+ return chunks_.cend();
+ }
+
+ ImageChunk& operator[](size_t i);
+ const ImageChunk& operator[](size_t i) const;
+
+ size_t NumOfChunks() const {
+ return chunks_.size();
+ }
+
+ protected:
+ bool ReadFile(const std::string& filename, std::vector<uint8_t>* file_content);
+
+ bool is_source_; // True if it's for source chunks.
+ std::vector<ImageChunk> chunks_; // Internal storage of ImageChunk.
+ std::vector<uint8_t> file_content_; // Store the whole input file in memory.
+};
+
+class ZipModeImage : public Image {
+ public:
+ explicit ZipModeImage(bool is_source, size_t limit = 0) : Image(is_source), limit_(limit) {}
+
+ bool Initialize(const std::string& filename) override;
+
+ // Initialize a dummy ZipModeImage from an existing ImageChunk vector. For src img pieces, we
+ // reconstruct a new file_content based on the source ranges; but it's not needed for the tgt img
+ // pieces; because for each chunk both the data and their offset within the file are unchanged.
+ void Initialize(const std::vector<ImageChunk>& chunks, const std::vector<uint8_t>& file_content) {
+ chunks_ = chunks;
+ file_content_ = file_content;
+ }
+
+ // The pesudo source chunk for bsdiff if there's no match for the given target chunk. It's in
+ // fact the whole source file.
+ ImageChunk PseudoSource() const;
+
+ // Find the matching deflate source chunk by entry name. Search for normal chunks also if
+ // |find_normal| is true.
+ ImageChunk* FindChunkByName(const std::string& name, bool find_normal = false);
+
+ const ImageChunk* FindChunkByName(const std::string& name, bool find_normal = false) const;
+
+ // Verify that we can reconstruct the deflate chunks; also change the type to CHUNK_NORMAL if
+ // src and tgt are identical.
+ static bool CheckAndProcessChunks(ZipModeImage* tgt_image, ZipModeImage* src_image);
+
+ // Compute the patch between tgt & src images, and write the data into |patch_name|.
+ static bool GeneratePatches(const ZipModeImage& tgt_image, const ZipModeImage& src_image,
+ const std::string& patch_name);
+
+ // Compute the patch based on the lists of split src and tgt images. Generate patches for each
+ // pair of split pieces and write the data to |patch_name|. If |debug_dir| is specified, write
+ // each split src data and patch data into that directory.
+ static bool GeneratePatches(const std::vector<ZipModeImage>& split_tgt_images,
+ const std::vector<ZipModeImage>& split_src_images,
+ const std::vector<SortedRangeSet>& split_src_ranges,
+ const std::string& patch_name, const std::string& debug_dir);
+
+ // Split the tgt chunks and src chunks based on the size limit.
+ static bool SplitZipModeImageWithLimit(const ZipModeImage& tgt_image,
+ const ZipModeImage& src_image,
+ std::vector<ZipModeImage>* split_tgt_images,
+ std::vector<ZipModeImage>* split_src_images,
+ std::vector<SortedRangeSet>* split_src_ranges);
+
+ private:
+ // Initialize image chunks based on the zip entries.
+ bool InitializeChunks(const std::string& filename, ZipArchiveHandle handle);
+ // Add the a zip entry to the list.
+ bool AddZipEntryToChunks(ZipArchiveHandle handle, const std::string& entry_name, ZipEntry* entry);
+ // Return the real size of the zip file. (omit the trailing zeros that used for alignment)
+ bool GetZipFileSize(size_t* input_file_size);
+
+ static void ValidateSplitImages(const std::vector<ZipModeImage>& split_tgt_images,
+ const std::vector<ZipModeImage>& split_src_images,
+ std::vector<SortedRangeSet>& split_src_ranges,
+ size_t total_tgt_size);
+ // Construct the dummy split images based on the chunks info and source ranges; and move them into
+ // the given vectors.
+ static void AddSplitImageFromChunkList(const ZipModeImage& tgt_image,
+ const ZipModeImage& src_image,
+ const SortedRangeSet& split_src_ranges,
+ const std::vector<ImageChunk>& split_tgt_chunks,
+ const std::vector<ImageChunk>& split_src_chunks,
+ std::vector<ZipModeImage>* split_tgt_images,
+ std::vector<ZipModeImage>* split_src_images);
+
+ // Function that actually iterates the tgt_chunks and makes patches.
+ static bool GeneratePatchesInternal(const ZipModeImage& tgt_image, const ZipModeImage& src_image,
+ std::vector<PatchChunk>* patch_chunks);
+
+ // size limit in bytes of each chunk. Also, if the length of one zip_entry exceeds the limit,
+ // we'll split that entry into several smaller chunks in advance.
+ size_t limit_;
+};
+
+class ImageModeImage : public Image {
+ public:
+ explicit ImageModeImage(bool is_source) : Image(is_source) {}
+
+ // Initialize the image chunks list by searching the magic numbers in an image file.
+ bool Initialize(const std::string& filename) override;
+
+ bool SetBonusData(const std::vector<uint8_t>& bonus_data);
+
+ // In Image Mode, verify that the source and target images have the same chunk structure (ie, the
+ // same sequence of deflate and normal chunks).
+ static bool CheckAndProcessChunks(ImageModeImage* tgt_image, ImageModeImage* src_image);
+
+ // In image mode, generate patches against the given source chunks and bonus_data; write the
+ // result to |patch_name|.
+ static bool GeneratePatches(const ImageModeImage& tgt_image, const ImageModeImage& src_image,
+ const std::string& patch_name);
+};
+
+#endif // _APPLYPATCH_IMGDIFF_IMAGE_H
diff --git a/bootloader_message/Android.bp b/bootloader_message/Android.bp
index f0d76e7..456b04c 100644
--- a/bootloader_message/Android.bp
+++ b/bootloader_message/Android.bp
@@ -17,7 +17,10 @@
cc_library_static {
name: "libbootloader_message",
srcs: ["bootloader_message.cpp"],
- cppflags: ["-Werror"],
+ cppflags: [
+ "-Wall",
+ "-Werror",
+ ],
static_libs: [
"libbase",
"libfs_mgr",
diff --git a/edify/Android.mk b/edify/Android.mk
index d8058c1..baf4dd2 100644
--- a/edify/Android.mk
+++ b/edify/Android.mk
@@ -28,13 +28,12 @@
$(edify_src_files) \
edify_parser.cpp
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_CPPFLAGS := -g -O0
LOCAL_MODULE := edify_parser
LOCAL_YACCFLAGS := -v
LOCAL_CPPFLAGS += -Wno-unused-parameter
LOCAL_CPPFLAGS += -Wno-deprecated-register
-LOCAL_CLANG := true
LOCAL_C_INCLUDES += $(LOCAL_PATH)/..
LOCAL_STATIC_LIBRARIES += libbase
@@ -47,11 +46,10 @@
LOCAL_SRC_FILES := $(edify_src_files)
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_CPPFLAGS := -Wno-unused-parameter
LOCAL_CPPFLAGS += -Wno-deprecated-register
LOCAL_MODULE := libedify
-LOCAL_CLANG := true
LOCAL_C_INCLUDES += $(LOCAL_PATH)/..
LOCAL_STATIC_LIBRARIES += libbase
diff --git a/edify/lexer.ll b/edify/lexer.ll
index b764d16..cb45943 100644
--- a/edify/lexer.ll
+++ b/edify/lexer.ll
@@ -35,6 +35,8 @@
%x STR
+%option noinput
+%option nounput
%option noyywrap
%%
diff --git a/error_code.h b/error_code.h
index 9fe047c..4e3032b 100644
--- a/error_code.h
+++ b/error_code.h
@@ -25,6 +25,9 @@
kBootreasonInBlacklist,
kPackageCompatibilityFailure,
kScriptExecutionFailure,
+ kMapFileFailure,
+ kForkUpdateBinaryFailure,
+ kUpdateBinaryCommandFailure,
};
enum CauseCode {
@@ -68,6 +71,8 @@
kUncryptFileCloseError,
kUncryptFileRenameError,
kUncryptPackageMissingError,
+ kUncryptRealpathFindError,
+ kUncryptBlockDeviceFindError,
};
#endif // _ERROR_CODE_H_
diff --git a/install.cpp b/install.cpp
index 7fbf5c0..586dbbe 100644
--- a/install.cpp
+++ b/install.cpp
@@ -148,13 +148,23 @@
return INSTALL_ERROR;
}
- // We allow the package to not have any serialno, but if it has a non-empty
- // value it should match.
+ // We allow the package to not have any serialno; and we also allow it to carry multiple serial
+ // numbers split by "|"; e.g. serialno=serialno1|serialno2|serialno3 ... We will fail the
+ // verification if the device's serialno doesn't match any of these carried numbers.
value = android::base::GetProperty("ro.serialno", "");
const std::string& pkg_serial_no = metadata["serialno"];
- if (!pkg_serial_no.empty() && pkg_serial_no != value) {
- LOG(ERROR) << "Package is for serial " << pkg_serial_no;
- return INSTALL_ERROR;
+ if (!pkg_serial_no.empty()) {
+ bool match = false;
+ for (const std::string& number : android::base::Split(pkg_serial_no, "|")) {
+ if (value == android::base::Trim(number)) {
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ LOG(ERROR) << "Package is for serial " << pkg_serial_no;
+ return INSTALL_ERROR;
+ }
}
if (metadata["ota-type"] != "AB") {
@@ -321,6 +331,7 @@
if (ret) {
close(pipefd[0]);
close(pipefd[1]);
+ log_buffer->push_back(android::base::StringPrintf("error: %d", kUpdateBinaryCommandFailure));
return ret;
}
@@ -385,6 +396,7 @@
close(pipefd[0]);
close(pipefd[1]);
PLOG(ERROR) << "Failed to fork update binary";
+ log_buffer->push_back(android::base::StringPrintf("error: %d", kForkUpdateBinaryFailure));
return INSTALL_ERROR;
}
@@ -573,6 +585,7 @@
MemMapping map;
if (!map.MapFile(path)) {
LOG(ERROR) << "failed to map file";
+ log_buffer->push_back(android::base::StringPrintf("error: %d", kMapFileFailure));
return INSTALL_CORRUPT;
}
diff --git a/minadbd/Android.mk b/minadbd/Android.mk
index de0b0c8..8d86fd6 100644
--- a/minadbd/Android.mk
+++ b/minadbd/Android.mk
@@ -15,7 +15,6 @@
minadbd.cpp \
minadbd_services.cpp \
-LOCAL_CLANG := true
LOCAL_MODULE := libminadbd
LOCAL_CFLAGS := $(minadbd_cflags)
LOCAL_CONLY_FLAGS := -Wimplicit-function-declaration
@@ -27,7 +26,6 @@
include $(CLEAR_VARS)
-LOCAL_CLANG := true
LOCAL_MODULE := minadbd_test
LOCAL_COMPATIBILITY_SUITE := device-tests
LOCAL_SRC_FILES := fuse_adb_provider_test.cpp
diff --git a/minui/Android.mk b/minui/Android.mk
index 6522fcf..1abcf0a 100644
--- a/minui/Android.mk
+++ b/minui/Android.mk
@@ -32,7 +32,7 @@
libpng \
libbase
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
@@ -68,7 +68,7 @@
libpng \
libbase
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
include $(BUILD_SHARED_LIBRARY)
diff --git a/otafault/Android.mk b/otafault/Android.mk
index ec4cdb3..4784d56 100644
--- a/otafault/Android.mk
+++ b/otafault/Android.mk
@@ -24,6 +24,7 @@
liblog
LOCAL_CFLAGS := \
+ -Wall \
-Werror \
-Wthread-safety \
-Wthread-safety-negative \
@@ -32,7 +33,6 @@
LOCAL_SRC_FILES := config.cpp ota_io.cpp
LOCAL_MODULE_TAGS := eng
LOCAL_MODULE := libotafault
-LOCAL_CLANG := true
LOCAL_C_INCLUDES := bootable/recovery
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
LOCAL_WHOLE_STATIC_LIBRARIES := $(otafault_static_libs)
@@ -47,7 +47,7 @@
LOCAL_MODULE_TAGS := tests
LOCAL_MODULE := otafault_test
LOCAL_STATIC_LIBRARIES := $(otafault_static_libs)
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_C_INCLUDES := bootable/recovery
LOCAL_FORCE_STATIC_EXECUTABLE := true
diff --git a/otafault/config.cpp b/otafault/config.cpp
index 8590833..b94e429 100644
--- a/otafault/config.cpp
+++ b/otafault/config.cpp
@@ -69,7 +69,9 @@
fname.resize(OTAIO_MAX_FNAME_SIZE);
ZipString zip_type_path(type_path.c_str());
ZipEntry entry;
- int status = FindEntry(archive, zip_type_path, &entry);
+ if (FindEntry(archive, zip_type_path, &entry) != 0) {
+ return {};
+ }
ExtractToMemory(archive, &entry, reinterpret_cast<uint8_t*>(&fname[0]), OTAIO_MAX_FNAME_SIZE);
return fname;
}
diff --git a/otautil/DirUtil.cpp b/otautil/DirUtil.cpp
index e08e360..fffc822 100644
--- a/otautil/DirUtil.cpp
+++ b/otautil/DirUtil.cpp
@@ -16,203 +16,101 @@
#include "DirUtil.h"
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <errno.h>
#include <dirent.h>
-#include <limits.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
#include <string>
#include <selinux/label.h>
#include <selinux/selinux.h>
-typedef enum { DMISSING, DDIR, DILLEGAL } DirStatus;
+enum class DirStatus { DMISSING, DDIR, DILLEGAL };
-static DirStatus
-getPathDirStatus(const char *path)
-{
- struct stat st;
- int err;
-
- err = stat(path, &st);
- if (err == 0) {
- /* Something's there; make sure it's a directory.
- */
- if (S_ISDIR(st.st_mode)) {
- return DDIR;
- }
- errno = ENOTDIR;
- return DILLEGAL;
- } else if (errno != ENOENT) {
- /* Something went wrong, or something in the path
- * is bad. Can't do anything in this situation.
- */
- return DILLEGAL;
+static DirStatus dir_status(const std::string& path) {
+ struct stat sb;
+ if (stat(path.c_str(), &sb) == 0) {
+ // Something's there; make sure it's a directory.
+ if (S_ISDIR(sb.st_mode)) {
+ return DirStatus::DDIR;
}
- return DMISSING;
+ errno = ENOTDIR;
+ return DirStatus::DILLEGAL;
+ } else if (errno != ENOENT) {
+ // Something went wrong, or something in the path is bad. Can't do anything in this situation.
+ return DirStatus::DILLEGAL;
+ }
+ return DirStatus::DMISSING;
}
-int
-dirCreateHierarchy(const char *path, int mode,
- const struct utimbuf *timestamp, bool stripFileName,
- struct selabel_handle *sehnd)
-{
- DirStatus ds;
+int mkdir_recursively(const std::string& input_path, mode_t mode, bool strip_filename,
+ const selabel_handle* sehnd) {
+ // Check for an empty string before we bother making any syscalls.
+ if (input_path.empty()) {
+ errno = ENOENT;
+ return -1;
+ }
- /* Check for an empty string before we bother
- * making any syscalls.
- */
- if (path[0] == '\0') {
- errno = ENOENT;
- return -1;
+ // Allocate a path that we can modify; stick a slash on the end to make things easier.
+ std::string path = input_path;
+ if (strip_filename) {
+ // Strip everything after the last slash.
+ size_t pos = path.rfind('/');
+ if (pos == std::string::npos) {
+ errno = ENOENT;
+ return -1;
}
- // Allocate a path that we can modify; stick a slash on
- // the end to make things easier.
- std::string cpath = path;
- if (stripFileName) {
- // Strip everything after the last slash.
- size_t pos = cpath.rfind('/');
- if (pos == std::string::npos) {
- errno = ENOENT;
- return -1;
- }
- cpath.resize(pos + 1);
- } else {
- // Make sure that the path ends in a slash.
- cpath.push_back('/');
- }
+ path.resize(pos + 1);
+ } else {
+ // Make sure that the path ends in a slash.
+ path.push_back('/');
+ }
- /* See if it already exists.
- */
- ds = getPathDirStatus(cpath.c_str());
- if (ds == DDIR) {
- return 0;
- } else if (ds == DILLEGAL) {
- return -1;
- }
-
- /* Walk up the path from the root and make each level.
- * If a directory already exists, no big deal.
- */
- const char *path_start = &cpath[0];
- char *p = &cpath[0];
- while (*p != '\0') {
- /* Skip any slashes, watching out for the end of the string.
- */
- while (*p != '\0' && *p == '/') {
- p++;
- }
- if (*p == '\0') {
- break;
- }
-
- /* Find the end of the next path component.
- * We know that we'll see a slash before the NUL,
- * because we added it, above.
- */
- while (*p != '/') {
- p++;
- }
- *p = '\0';
-
- /* Check this part of the path and make a new directory
- * if necessary.
- */
- ds = getPathDirStatus(path_start);
- if (ds == DILLEGAL) {
- /* Could happen if some other process/thread is
- * messing with the filesystem.
- */
- return -1;
- } else if (ds == DMISSING) {
- int err;
-
- char *secontext = NULL;
-
- if (sehnd) {
- selabel_lookup(sehnd, &secontext, path_start, mode);
- setfscreatecon(secontext);
- }
-
- err = mkdir(path_start, mode);
-
- if (secontext) {
- freecon(secontext);
- setfscreatecon(NULL);
- }
-
- if (err != 0) {
- return -1;
- }
- if (timestamp != NULL && utime(path_start, timestamp)) {
- return -1;
- }
- }
- // else, this directory already exists.
-
- // Repair the path and continue.
- *p = '/';
- }
+ // See if it already exists.
+ DirStatus ds = dir_status(path);
+ if (ds == DirStatus::DDIR) {
return 0;
-}
+ } else if (ds == DirStatus::DILLEGAL) {
+ return -1;
+ }
-int
-dirUnlinkHierarchy(const char *path)
-{
- struct stat st;
- DIR *dir;
- struct dirent *de;
- int fail = 0;
-
- /* is it a file or directory? */
- if (lstat(path, &st) < 0) {
+ // Walk up the path from the root and make each level.
+ size_t prev_end = 0;
+ while (prev_end < path.size()) {
+ size_t next_end = path.find('/', prev_end + 1);
+ if (next_end == std::string::npos) {
+ break;
+ }
+ std::string dir_path = path.substr(0, next_end);
+ // Check this part of the path and make a new directory if necessary.
+ switch (dir_status(dir_path)) {
+ case DirStatus::DILLEGAL:
+ // Could happen if some other process/thread is messing with the filesystem.
return -1;
- }
-
- /* a file, so unlink it */
- if (!S_ISDIR(st.st_mode)) {
- return unlink(path);
- }
-
- /* a directory, so open handle */
- dir = opendir(path);
- if (dir == NULL) {
- return -1;
- }
-
- /* recurse over components */
- errno = 0;
- while ((de = readdir(dir)) != NULL) {
- //TODO: don't blow the stack
- char dn[PATH_MAX];
- if (!strcmp(de->d_name, "..") || !strcmp(de->d_name, ".")) {
- continue;
+ case DirStatus::DMISSING: {
+ char* secontext = nullptr;
+ if (sehnd) {
+ selabel_lookup(const_cast<selabel_handle*>(sehnd), &secontext, dir_path.c_str(), mode);
+ setfscreatecon(secontext);
}
- snprintf(dn, sizeof(dn), "%s/%s", path, de->d_name);
- if (dirUnlinkHierarchy(dn) < 0) {
- fail = 1;
- break;
+ int err = mkdir(dir_path.c_str(), mode);
+ if (secontext) {
+ freecon(secontext);
+ setfscreatecon(nullptr);
}
- errno = 0;
+ if (err != 0) {
+ return -1;
+ }
+ break;
+ }
+ default:
+ // Already exists.
+ break;
}
- /* in case readdir or unlink_recursive failed */
- if (fail || errno < 0) {
- int save = errno;
- closedir(dir);
- errno = save;
- return -1;
- }
-
- /* close directory handle */
- if (closedir(dir) < 0) {
- return -1;
- }
-
- /* delete target directory */
- return rmdir(path);
+ prev_end = next_end;
+ }
+ return 0;
}
diff --git a/otautil/DirUtil.h b/otautil/DirUtil.h
index 85b83c3..85d6c16 100644
--- a/otautil/DirUtil.h
+++ b/otautil/DirUtil.h
@@ -14,41 +14,26 @@
* limitations under the License.
*/
-#ifndef MINZIP_DIRUTIL_H_
-#define MINZIP_DIRUTIL_H_
+#ifndef OTAUTIL_DIRUTIL_H_
+#define OTAUTIL_DIRUTIL_H_
-#include <stdbool.h>
-#include <utime.h>
+#include <sys/stat.h> // mode_t
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include <string>
struct selabel_handle;
-/* Like "mkdir -p", try to guarantee that all directories
- * specified in path are present, creating as many directories
- * as necessary. The specified mode is passed to all mkdir
- * calls; no modifications are made to umask.
- *
- * If stripFileName is set, everything after the final '/'
- * is stripped before creating the directory hierarchy.
- *
- * If timestamp is non-NULL, new directories will be timestamped accordingly.
- *
- * Returns 0 on success; returns -1 (and sets errno) on failure
- * (usually if some element of path is not a directory).
- */
-int dirCreateHierarchy(const char *path, int mode,
- const struct utimbuf *timestamp, bool stripFileName,
- struct selabel_handle* sehnd);
+// Like "mkdir -p", try to guarantee that all directories specified in path are present, creating as
+// many directories as necessary. The specified mode is passed to all mkdir calls; no modifications
+// are made to umask.
+//
+// If strip_filename is set, everything after the final '/' is stripped before creating the
+// directory
+// hierarchy.
+//
+// Returns 0 on success; returns -1 (and sets errno) on failure (usually if some element of path is
+// not a directory).
+int mkdir_recursively(const std::string& path, mode_t mode, bool strip_filename,
+ const struct selabel_handle* sehnd);
-/* rm -rf <path>
- */
-int dirUnlinkHierarchy(const char *path);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // MINZIP_DIRUTIL_H_
+#endif // OTAUTIL_DIRUTIL_H_
diff --git a/print_sha1.h b/print_sha1.h
index 1f85895..d0c18b3 100644
--- a/print_sha1.h
+++ b/print_sha1.h
@@ -32,15 +32,15 @@
return result;
}
-static std::string print_sha1(const uint8_t sha1[SHA_DIGEST_LENGTH]) {
+[[maybe_unused]] static std::string print_sha1(const uint8_t sha1[SHA_DIGEST_LENGTH]) {
return print_sha1(sha1, SHA_DIGEST_LENGTH);
}
-static std::string short_sha1(const uint8_t sha1[SHA_DIGEST_LENGTH]) {
+[[maybe_unused]] static std::string short_sha1(const uint8_t sha1[SHA_DIGEST_LENGTH]) {
return print_sha1(sha1, 4);
}
-static std::string print_hex(const uint8_t* bytes, size_t len) {
+[[maybe_unused]] static std::string print_hex(const uint8_t* bytes, size_t len) {
return print_sha1(bytes, len);
}
diff --git a/rangeset.h b/rangeset.h
new file mode 100644
index 0000000..f224a08
--- /dev/null
+++ b/rangeset.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stddef.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android-base/logging.h>
+#include <android-base/parseint.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+
+using Range = std::pair<size_t, size_t>;
+
+class RangeSet {
+ public:
+ RangeSet() : blocks_(0) {}
+
+ explicit RangeSet(std::vector<Range>&& pairs) {
+ CHECK_NE(pairs.size(), static_cast<size_t>(0)) << "Invalid number of tokens";
+
+ // Sanity check the input.
+ size_t result = 0;
+ for (const auto& range : pairs) {
+ CHECK_LT(range.first, range.second)
+ << "Empty or negative range: " << range.first << ", " << range.second;
+ size_t sz = range.second - range.first;
+ CHECK_LE(result, SIZE_MAX - sz) << "RangeSet size overflow";
+ result += sz;
+ }
+
+ ranges_ = pairs;
+ blocks_ = result;
+ }
+
+ static RangeSet Parse(const std::string& range_text) {
+ std::vector<std::string> pieces = android::base::Split(range_text, ",");
+ CHECK_GE(pieces.size(), static_cast<size_t>(3)) << "Invalid range text: " << range_text;
+
+ size_t num;
+ CHECK(android::base::ParseUint(pieces[0], &num, static_cast<size_t>(INT_MAX)))
+ << "Failed to parse the number of tokens: " << range_text;
+
+ CHECK_NE(num, static_cast<size_t>(0)) << "Invalid number of tokens: " << range_text;
+ CHECK_EQ(num % 2, static_cast<size_t>(0)) << "Number of tokens must be even: " << range_text;
+ CHECK_EQ(num, pieces.size() - 1) << "Mismatching number of tokens: " << range_text;
+
+ std::vector<Range> pairs;
+ for (size_t i = 0; i < num; i += 2) {
+ size_t first;
+ CHECK(android::base::ParseUint(pieces[i + 1], &first, static_cast<size_t>(INT_MAX)));
+ size_t second;
+ CHECK(android::base::ParseUint(pieces[i + 2], &second, static_cast<size_t>(INT_MAX)));
+
+ pairs.emplace_back(first, second);
+ }
+
+ return RangeSet(std::move(pairs));
+ }
+
+ std::string ToString() const {
+ if (ranges_.empty()) {
+ return "";
+ }
+ std::string result = std::to_string(ranges_.size() * 2);
+ for (const auto& r : ranges_) {
+ result += android::base::StringPrintf(",%zu,%zu", r.first, r.second);
+ }
+
+ return result;
+ }
+
+ // Get the block number for the i-th (starting from 0) block in the RangeSet.
+ size_t GetBlockNumber(size_t idx) const {
+ CHECK_LT(idx, blocks_) << "Out of bound index " << idx << " (total blocks: " << blocks_ << ")";
+
+ for (const auto& range : ranges_) {
+ if (idx < range.second - range.first) {
+ return range.first + idx;
+ }
+ idx -= (range.second - range.first);
+ }
+
+ CHECK(false) << "Failed to find block number for index " << idx;
+ return 0; // Unreachable, but to make compiler happy.
+ }
+
+ // RangeSet has half-closed half-open bounds. For example, "3,5" contains blocks 3 and 4. So "3,5"
+ // and "5,7" are not overlapped.
+ bool Overlaps(const RangeSet& other) const {
+ for (const auto& range : ranges_) {
+ size_t start = range.first;
+ size_t end = range.second;
+ for (const auto& other_range : other.ranges_) {
+ size_t other_start = other_range.first;
+ size_t other_end = other_range.second;
+ // [start, end) vs [other_start, other_end)
+ if (!(other_start >= end || start >= other_end)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ // size() gives the number of Range's in this RangeSet.
+ size_t size() const {
+ return ranges_.size();
+ }
+
+ // blocks() gives the number of all blocks in this RangeSet.
+ size_t blocks() const {
+ return blocks_;
+ }
+
+ // We provide const iterators only.
+ std::vector<Range>::const_iterator cbegin() const {
+ return ranges_.cbegin();
+ }
+
+ std::vector<Range>::const_iterator cend() const {
+ return ranges_.cend();
+ }
+
+ // Need to provide begin()/end() since range-based loop expects begin()/end().
+ std::vector<Range>::const_iterator begin() const {
+ return ranges_.cbegin();
+ }
+
+ std::vector<Range>::const_iterator end() const {
+ return ranges_.cend();
+ }
+
+ // Reverse const iterators for MoveRange().
+ std::vector<Range>::const_reverse_iterator crbegin() const {
+ return ranges_.crbegin();
+ }
+
+ std::vector<Range>::const_reverse_iterator crend() const {
+ return ranges_.crend();
+ }
+
+ const Range& operator[](size_t i) const {
+ return ranges_[i];
+ }
+
+ bool operator==(const RangeSet& other) const {
+ // The orders of Range's matter. "4,1,5,8,10" != "4,8,10,1,5".
+ return (ranges_ == other.ranges_);
+ }
+
+ bool operator!=(const RangeSet& other) const {
+ return ranges_ != other.ranges_;
+ }
+
+ protected:
+ // Actual limit for each value and the total number are both INT_MAX.
+ std::vector<Range> ranges_;
+ size_t blocks_;
+};
+
+static constexpr size_t kBlockSize = 4096;
+
+// The class is a sorted version of a RangeSet; and it's useful in imgdiff to split the input
+// files when we're handling large zip files. Specifically, we can treat the input file as a
+// continuous RangeSet (i.e. RangeSet("0-99") for a 100 blocks file); and break it down into
+// several smaller chunks based on the zip entries.
+
+// For example, [source: 0-99] can be split into
+// [split_src1: 10-29]; [split_src2: 40-49, 60-69]; [split_src3: 70-89]
+// Here "10-29" simply means block 10th to block 29th with respect to the original input file.
+// Also, note that the split sources should be mutual exclusive, but they don't need to cover
+// every block in the original source.
+class SortedRangeSet : public RangeSet {
+ public:
+ SortedRangeSet() {}
+
+ // Ranges in the the set should be mutually exclusive; and they're sorted by the start block.
+ explicit SortedRangeSet(std::vector<Range>&& pairs) : RangeSet(std::move(pairs)) {
+ std::sort(ranges_.begin(), ranges_.end());
+ }
+
+ void Insert(const Range& to_insert) {
+ SortedRangeSet rs({ to_insert });
+ Insert(rs);
+ }
+
+ // Insert the input SortedRangeSet; keep the ranges sorted and merge the overlap ranges.
+ void Insert(const SortedRangeSet& rs) {
+ if (rs.size() == 0) {
+ return;
+ }
+ // Merge and sort the two RangeSets.
+ std::vector<Range> temp = std::move(ranges_);
+ std::copy(rs.begin(), rs.end(), std::back_inserter(temp));
+ std::sort(temp.begin(), temp.end());
+
+ Clear();
+ // Trim overlaps and insert the result back to ranges_.
+ Range to_insert = temp.front();
+ for (auto it = temp.cbegin() + 1; it != temp.cend(); it++) {
+ if (it->first <= to_insert.second) {
+ to_insert.second = std::max(to_insert.second, it->second);
+ } else {
+ ranges_.push_back(to_insert);
+ blocks_ += (to_insert.second - to_insert.first);
+ to_insert = *it;
+ }
+ }
+ ranges_.push_back(to_insert);
+ blocks_ += (to_insert.second - to_insert.first);
+ }
+
+ void Clear() {
+ blocks_ = 0;
+ ranges_.clear();
+ }
+
+ using RangeSet::Overlaps;
+ bool Overlaps(size_t start, size_t len) const {
+ RangeSet rs({ { start / kBlockSize, (start + len - 1) / kBlockSize + 1 } });
+ return Overlaps(rs);
+ }
+
+ // Compute the block range the file occupies, and insert that range.
+ void Insert(size_t start, size_t len) {
+ Range to_insert{ start / kBlockSize, (start + len - 1) / kBlockSize + 1 };
+ Insert(to_insert);
+ }
+
+ // Given an offset of the file, checks if the corresponding block (by considering the file as
+ // 0-based continuous block ranges) is covered by the SortedRangeSet. If so, returns the offset
+ // within this SortedRangeSet.
+ //
+ // For example, the 4106-th byte of a file is from block 1, assuming a block size of 4096-byte.
+ // The mapped offset within a SortedRangeSet("1-9 15-19") is 10.
+ //
+ // An offset of 65546 falls into the 16-th block in a file. Block 16 is contained as the 10-th
+ // item in SortedRangeSet("1-9 15-19"). So its data can be found at offset 40970 (i.e. 4096 * 10
+ // + 10) in a range represented by this SortedRangeSet.
+ size_t GetOffsetInRangeSet(size_t old_offset) const {
+ size_t old_block_start = old_offset / kBlockSize;
+ size_t new_block_start = 0;
+ for (const auto& range : ranges_) {
+ // Find the index of old_block_start.
+ if (old_block_start >= range.second) {
+ new_block_start += (range.second - range.first);
+ } else if (old_block_start >= range.first) {
+ new_block_start += (old_block_start - range.first);
+ return (new_block_start * kBlockSize + old_offset % kBlockSize);
+ } else {
+ CHECK(false) << "block_start " << old_block_start
+ << " is missing between two ranges: " << this->ToString();
+ return 0;
+ }
+ }
+ CHECK(false) << "block_start " << old_block_start
+ << " exceeds the limit of current RangeSet: " << this->ToString();
+ return 0;
+ }
+};
\ No newline at end of file
diff --git a/recovery.cpp b/recovery.cpp
index 07bd7b9..6f62ff1 100644
--- a/recovery.cpp
+++ b/recovery.cpp
@@ -179,19 +179,19 @@
* 7b. the user reboots (pulling the battery, etc) into the main system
*/
-// open a given path, mounting partitions as necessary
-FILE* fopen_path(const char *path, const char *mode) {
- if (ensure_path_mounted(path) != 0) {
- LOG(ERROR) << "Can't mount " << path;
- return NULL;
- }
+// Open a given path, mounting partitions as necessary.
+FILE* fopen_path(const char* path, const char* mode) {
+ if (ensure_path_mounted(path) != 0) {
+ LOG(ERROR) << "Can't mount " << path;
+ return nullptr;
+ }
- // When writing, try to create the containing directory, if necessary.
- // Use generous permissions, the system (init.rc) will reset them.
- if (strchr("wa", mode[0])) dirCreateHierarchy(path, 0777, NULL, 1, sehandle);
-
- FILE *fp = fopen(path, mode);
- return fp;
+ // When writing, try to create the containing directory, if necessary. Use generous permissions,
+ // the system (init.rc) will reset them.
+ if (strchr("wa", mode[0])) {
+ mkdir_recursively(path, 0777, true, sehandle);
+ }
+ return fopen(path, mode);
}
// close a file, log an error if the error indicator is set
@@ -594,7 +594,7 @@
if (is_cache) {
// Re-create the log dir and write back the log entries.
if (ensure_path_mounted(CACHE_LOG_DIR) == 0 &&
- dirCreateHierarchy(CACHE_LOG_DIR, 0777, nullptr, false, sehandle) == 0) {
+ mkdir_recursively(CACHE_LOG_DIR, 0777, false, sehandle) == 0) {
for (const auto& log : log_files) {
if (!android::base::WriteStringToFile(log.data, log.name, log.sb.st_mode, log.sb.st_uid,
log.sb.st_gid)) {
diff --git a/roots.cpp b/roots.cpp
index 29f55b9..fdcbfe8 100644
--- a/roots.cpp
+++ b/roots.cpp
@@ -16,162 +16,166 @@
#include "roots.h"
-#include <errno.h>
+#include <ctype.h>
+#include <fcntl.h>
#include <stdlib.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
-#include <ctype.h>
-#include <fcntl.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
#include <android-base/logging.h>
#include <android-base/properties.h>
#include <android-base/stringprintf.h>
#include <android-base/unique_fd.h>
+#include <cryptfs.h>
#include <ext4_utils/wipe.h>
#include <fs_mgr.h>
#include "common.h"
#include "mounts.h"
-#include "cryptfs.h"
-static struct fstab *fstab = NULL;
+static struct fstab* fstab = nullptr;
-extern struct selabel_handle *sehandle;
+extern struct selabel_handle* sehandle;
-void load_volume_table()
-{
- int i;
- int ret;
+void load_volume_table() {
+ fstab = fs_mgr_read_fstab_default();
+ if (!fstab) {
+ LOG(ERROR) << "Failed to read default fstab";
+ return;
+ }
- fstab = fs_mgr_read_fstab_default();
- if (!fstab) {
- LOG(ERROR) << "failed to read default fstab";
- return;
- }
+ int ret = fs_mgr_add_entry(fstab, "/tmp", "ramdisk", "ramdisk");
+ if (ret == -1) {
+ LOG(ERROR) << "Failed to add /tmp entry to fstab";
+ fs_mgr_free_fstab(fstab);
+ fstab = nullptr;
+ return;
+ }
- ret = fs_mgr_add_entry(fstab, "/tmp", "ramdisk", "ramdisk");
- if (ret < 0 ) {
- LOG(ERROR) << "failed to add /tmp entry to fstab";
- fs_mgr_free_fstab(fstab);
- fstab = NULL;
- return;
- }
-
- printf("recovery filesystem table\n");
- printf("=========================\n");
- for (i = 0; i < fstab->num_entries; ++i) {
- Volume* v = &fstab->recs[i];
- printf(" %d %s %s %s %lld\n", i, v->mount_point, v->fs_type,
- v->blk_device, v->length);
- }
- printf("\n");
+ printf("recovery filesystem table\n");
+ printf("=========================\n");
+ for (int i = 0; i < fstab->num_entries; ++i) {
+ const Volume* v = &fstab->recs[i];
+ printf(" %d %s %s %s %lld\n", i, v->mount_point, v->fs_type, v->blk_device, v->length);
+ }
+ printf("\n");
}
Volume* volume_for_path(const char* path) {
- return fs_mgr_get_entry_for_mount_point(fstab, path);
+ return fs_mgr_get_entry_for_mount_point(fstab, path);
}
// Mount the volume specified by path at the given mount_point.
int ensure_path_mounted_at(const char* path, const char* mount_point) {
- Volume* v = volume_for_path(path);
- if (v == NULL) {
- LOG(ERROR) << "unknown volume for path [" << path << "]";
- return -1;
- }
- if (strcmp(v->fs_type, "ramdisk") == 0) {
- // the ramdisk is always mounted.
- return 0;
- }
-
- if (!scan_mounted_volumes()) {
- LOG(ERROR) << "failed to scan mounted volumes";
- return -1;
- }
-
- if (!mount_point) {
- mount_point = v->mount_point;
- }
-
- MountedVolume* mv = find_mounted_volume_by_mount_point(mount_point);
- if (mv) {
- // volume is already mounted
- return 0;
- }
-
- mkdir(mount_point, 0755); // in case it doesn't already exist
-
- if (strcmp(v->fs_type, "ext4") == 0 ||
- strcmp(v->fs_type, "squashfs") == 0 ||
- strcmp(v->fs_type, "vfat") == 0) {
- int result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
- if (result == -1 && fs_mgr_is_formattable(v)) {
- LOG(ERROR) << "failed to mount " << mount_point << " (" << strerror(errno)
- << ") , formatting.....";
- bool crypt_footer = fs_mgr_is_encryptable(v) && !strcmp(v->key_loc, "footer");
- if (fs_mgr_do_format(v, crypt_footer) == 0) {
- result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
- } else {
- PLOG(ERROR) << "failed to format " << mount_point;
- return -1;
- }
- }
-
- if (result == -1) {
- PLOG(ERROR) << "failed to mount " << mount_point;
- return -1;
- }
- return 0;
- }
-
- LOG(ERROR) << "unknown fs_type \"" << v->fs_type << "\" for " << mount_point;
+ Volume* v = volume_for_path(path);
+ if (v == nullptr) {
+ LOG(ERROR) << "unknown volume for path [" << path << "]";
return -1;
+ }
+ if (strcmp(v->fs_type, "ramdisk") == 0) {
+ // The ramdisk is always mounted.
+ return 0;
+ }
+
+ if (!scan_mounted_volumes()) {
+ LOG(ERROR) << "Failed to scan mounted volumes";
+ return -1;
+ }
+
+ if (!mount_point) {
+ mount_point = v->mount_point;
+ }
+
+ const MountedVolume* mv = find_mounted_volume_by_mount_point(mount_point);
+ if (mv != nullptr) {
+ // Volume is already mounted.
+ return 0;
+ }
+
+ mkdir(mount_point, 0755); // in case it doesn't already exist
+
+ if (strcmp(v->fs_type, "ext4") == 0 || strcmp(v->fs_type, "squashfs") == 0 ||
+ strcmp(v->fs_type, "vfat") == 0) {
+ int result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
+ if (result == -1 && fs_mgr_is_formattable(v)) {
+ PLOG(ERROR) << "Failed to mount " << mount_point << "; formatting";
+ bool crypt_footer = fs_mgr_is_encryptable(v) && !strcmp(v->key_loc, "footer");
+ if (fs_mgr_do_format(v, crypt_footer) == 0) {
+ result = mount(v->blk_device, mount_point, v->fs_type, v->flags, v->fs_options);
+ } else {
+ PLOG(ERROR) << "Failed to format " << mount_point;
+ return -1;
+ }
+ }
+
+ if (result == -1) {
+ PLOG(ERROR) << "Failed to mount " << mount_point;
+ return -1;
+ }
+ return 0;
+ }
+
+ LOG(ERROR) << "unknown fs_type \"" << v->fs_type << "\" for " << mount_point;
+ return -1;
}
int ensure_path_mounted(const char* path) {
- // Mount at the default mount point.
- return ensure_path_mounted_at(path, nullptr);
+ // Mount at the default mount point.
+ return ensure_path_mounted_at(path, nullptr);
}
int ensure_path_unmounted(const char* path) {
- Volume* v = volume_for_path(path);
- if (v == NULL) {
- LOG(ERROR) << "unknown volume for path [" << path << "]";
- return -1;
- }
- if (strcmp(v->fs_type, "ramdisk") == 0) {
- // the ramdisk is always mounted; you can't unmount it.
- return -1;
- }
+ const Volume* v = volume_for_path(path);
+ if (v == nullptr) {
+ LOG(ERROR) << "unknown volume for path [" << path << "]";
+ return -1;
+ }
+ if (strcmp(v->fs_type, "ramdisk") == 0) {
+ // The ramdisk is always mounted; you can't unmount it.
+ return -1;
+ }
- if (!scan_mounted_volumes()) {
- LOG(ERROR) << "failed to scan mounted volumes";
- return -1;
- }
+ if (!scan_mounted_volumes()) {
+ LOG(ERROR) << "Failed to scan mounted volumes";
+ return -1;
+ }
- MountedVolume* mv = find_mounted_volume_by_mount_point(v->mount_point);
- if (mv == NULL) {
- // volume is already unmounted
- return 0;
- }
+ MountedVolume* mv = find_mounted_volume_by_mount_point(v->mount_point);
+ if (mv == nullptr) {
+ // Volume is already unmounted.
+ return 0;
+ }
- return unmount_mounted_volume(mv);
+ return unmount_mounted_volume(mv);
}
-static int exec_cmd(const char* path, char* const argv[]) {
- int status;
- pid_t child;
- if ((child = vfork()) == 0) {
- execv(path, argv);
- _exit(EXIT_FAILURE);
- }
- waitpid(child, &status, 0);
- if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- LOG(ERROR) << path << " failed with status " << WEXITSTATUS(status);
- }
- return WEXITSTATUS(status);
+static int exec_cmd(const std::vector<std::string>& args) {
+ CHECK_NE(static_cast<size_t>(0), args.size());
+
+ std::vector<char*> argv(args.size());
+ std::transform(args.cbegin(), args.cend(), argv.begin(),
+ [](const std::string& arg) { return const_cast<char*>(arg.c_str()); });
+ argv.push_back(nullptr);
+
+ pid_t child;
+ if ((child = vfork()) == 0) {
+ execv(argv[0], argv.data());
+ _exit(EXIT_FAILURE);
+ }
+
+ int status;
+ waitpid(child, &status, 0);
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ LOG(ERROR) << args[0] << " failed with status " << WEXITSTATUS(status);
+ }
+ return WEXITSTATUS(status);
}
static ssize_t get_file_size(int fd, uint64_t reserve_len) {
@@ -192,136 +196,116 @@
}
int format_volume(const char* volume, const char* directory) {
- Volume* v = volume_for_path(volume);
- if (v == NULL) {
- LOG(ERROR) << "unknown volume \"" << volume << "\"";
- return -1;
- }
- if (strcmp(v->fs_type, "ramdisk") == 0) {
- // you can't format the ramdisk.
- LOG(ERROR) << "can't format_volume \"" << volume << "\"";
- return -1;
- }
- if (strcmp(v->mount_point, volume) != 0) {
- LOG(ERROR) << "can't give path \"" << volume << "\" to format_volume";
- return -1;
- }
-
- if (ensure_path_unmounted(volume) != 0) {
- LOG(ERROR) << "format_volume failed to unmount \"" << v->mount_point << "\"";
- return -1;
- }
-
- if (strcmp(v->fs_type, "ext4") == 0 || strcmp(v->fs_type, "f2fs") == 0) {
- // if there's a key_loc that looks like a path, it should be a
- // block device for storing encryption metadata. wipe it too.
- if (v->key_loc != NULL && v->key_loc[0] == '/') {
- LOG(INFO) << "wiping " << v->key_loc;
- int fd = open(v->key_loc, O_WRONLY | O_CREAT, 0644);
- if (fd < 0) {
- LOG(ERROR) << "format_volume: failed to open " << v->key_loc;
- return -1;
- }
- wipe_block_device(fd, get_file_size(fd));
- close(fd);
- }
-
- ssize_t length = 0;
- if (v->length != 0) {
- length = v->length;
- } else if (v->key_loc != NULL && strcmp(v->key_loc, "footer") == 0) {
- android::base::unique_fd fd(open(v->blk_device, O_RDONLY));
- if (fd < 0) {
- PLOG(ERROR) << "get_file_size: failed to open " << v->blk_device;
- return -1;
- }
- length = get_file_size(fd.get(), CRYPT_FOOTER_OFFSET);
- if (length <= 0) {
- LOG(ERROR) << "get_file_size: invalid size " << length << " for " << v->blk_device;
- return -1;
- }
- }
- int result;
- if (strcmp(v->fs_type, "ext4") == 0) {
- static constexpr int block_size = 4096;
- int raid_stride = v->logical_blk_size / block_size;
- int raid_stripe_width = v->erase_blk_size / block_size;
-
- // stride should be the max of 8kb and logical block size
- if (v->logical_blk_size != 0 && v->logical_blk_size < 8192) {
- raid_stride = 8192 / block_size;
- }
-
- const char* mke2fs_argv[] = { "/sbin/mke2fs_static",
- "-F",
- "-t",
- "ext4",
- "-b",
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr };
-
- int i = 5;
- std::string block_size_str = std::to_string(block_size);
- mke2fs_argv[i++] = block_size_str.c_str();
-
- std::string ext_args;
- if (v->erase_blk_size != 0 && v->logical_blk_size != 0) {
- ext_args = android::base::StringPrintf("stride=%d,stripe-width=%d", raid_stride,
- raid_stripe_width);
- mke2fs_argv[i++] = "-E";
- mke2fs_argv[i++] = ext_args.c_str();
- }
-
- mke2fs_argv[i++] = v->blk_device;
-
- std::string size_str = std::to_string(length / block_size);
- if (length != 0) {
- mke2fs_argv[i++] = size_str.c_str();
- }
-
- result = exec_cmd(mke2fs_argv[0], const_cast<char**>(mke2fs_argv));
- if (result == 0 && directory != nullptr) {
- const char* e2fsdroid_argv[] = { "/sbin/e2fsdroid_static",
- "-e",
- "-f",
- directory,
- "-a",
- volume,
- v->blk_device,
- nullptr };
-
- result = exec_cmd(e2fsdroid_argv[0], const_cast<char**>(e2fsdroid_argv));
- }
- } else { /* Has to be f2fs because we checked earlier. */
- char *num_sectors = nullptr;
- if (length >= 512 && asprintf(&num_sectors, "%zd", length / 512) <= 0) {
- LOG(ERROR) << "format_volume: failed to create " << v->fs_type
- << " command for " << v->blk_device;
- return -1;
- }
- const char *f2fs_path = "/sbin/mkfs.f2fs";
- const char* const f2fs_argv[] = {"mkfs.f2fs", "-t", "-d1", v->blk_device, num_sectors, nullptr};
-
- result = exec_cmd(f2fs_path, (char* const*)f2fs_argv);
- free(num_sectors);
- }
- if (result != 0) {
- PLOG(ERROR) << "format_volume: make " << v->fs_type << " failed on " << v->blk_device;
- return -1;
- }
- return 0;
- }
-
+ const Volume* v = volume_for_path(volume);
+ if (v == nullptr) {
+ LOG(ERROR) << "unknown volume \"" << volume << "\"";
+ return -1;
+ }
+ if (strcmp(v->fs_type, "ramdisk") == 0) {
+ LOG(ERROR) << "can't format_volume \"" << volume << "\"";
+ return -1;
+ }
+ if (strcmp(v->mount_point, volume) != 0) {
+ LOG(ERROR) << "can't give path \"" << volume << "\" to format_volume";
+ return -1;
+ }
+ if (ensure_path_unmounted(volume) != 0) {
+ LOG(ERROR) << "format_volume: Failed to unmount \"" << v->mount_point << "\"";
+ return -1;
+ }
+ if (strcmp(v->fs_type, "ext4") != 0 && strcmp(v->fs_type, "f2fs") != 0) {
LOG(ERROR) << "format_volume: fs_type \"" << v->fs_type << "\" unsupported";
return -1;
+ }
+
+ // If there's a key_loc that looks like a path, it should be a block device for storing encryption
+ // metadata. Wipe it too.
+ if (v->key_loc != nullptr && v->key_loc[0] == '/') {
+ LOG(INFO) << "Wiping " << v->key_loc;
+ int fd = open(v->key_loc, O_WRONLY | O_CREAT, 0644);
+ if (fd == -1) {
+ PLOG(ERROR) << "format_volume: Failed to open " << v->key_loc;
+ return -1;
+ }
+ wipe_block_device(fd, get_file_size(fd));
+ close(fd);
+ }
+
+ ssize_t length = 0;
+ if (v->length != 0) {
+ length = v->length;
+ } else if (v->key_loc != nullptr && strcmp(v->key_loc, "footer") == 0) {
+ android::base::unique_fd fd(open(v->blk_device, O_RDONLY));
+ if (fd == -1) {
+ PLOG(ERROR) << "get_file_size: failed to open " << v->blk_device;
+ return -1;
+ }
+ length = get_file_size(fd.get(), CRYPT_FOOTER_OFFSET);
+ if (length <= 0) {
+ LOG(ERROR) << "get_file_size: invalid size " << length << " for " << v->blk_device;
+ return -1;
+ }
+ }
+
+ if (strcmp(v->fs_type, "ext4") == 0) {
+ static constexpr int kBlockSize = 4096;
+ std::vector<std::string> mke2fs_args = {
+ "/sbin/mke2fs_static", "-F", "-t", "ext4", "-b", std::to_string(kBlockSize),
+ };
+
+ int raid_stride = v->logical_blk_size / kBlockSize;
+ int raid_stripe_width = v->erase_blk_size / kBlockSize;
+ // stride should be the max of 8KB and logical block size
+ if (v->logical_blk_size != 0 && v->logical_blk_size < 8192) {
+ raid_stride = 8192 / kBlockSize;
+ }
+ if (v->erase_blk_size != 0 && v->logical_blk_size != 0) {
+ mke2fs_args.push_back("-E");
+ mke2fs_args.push_back(
+ android::base::StringPrintf("stride=%d,stripe-width=%d", raid_stride, raid_stripe_width));
+ }
+ mke2fs_args.push_back(v->blk_device);
+ if (length != 0) {
+ mke2fs_args.push_back(std::to_string(length / kBlockSize));
+ }
+
+ int result = exec_cmd(mke2fs_args);
+ if (result == 0 && directory != nullptr) {
+ std::vector<std::string> e2fsdroid_args = {
+ "/sbin/e2fsdroid_static",
+ "-e",
+ "-f",
+ directory,
+ "-a",
+ volume,
+ v->blk_device,
+ };
+ result = exec_cmd(e2fsdroid_args);
+ }
+
+ if (result != 0) {
+ PLOG(ERROR) << "format_volume: Failed to make ext4 on " << v->blk_device;
+ return -1;
+ }
+ return 0;
+ }
+
+ // Has to be f2fs because we checked earlier.
+ std::vector<std::string> f2fs_args = { "/sbin/mkfs.f2fs", "-t", "-d1", v->blk_device };
+ if (length >= 512) {
+ f2fs_args.push_back(std::to_string(length / 512));
+ }
+
+ int result = exec_cmd(f2fs_args);
+ if (result != 0) {
+ PLOG(ERROR) << "format_volume: Failed to make f2fs on " << v->blk_device;
+ return -1;
+ }
+ return 0;
}
int format_volume(const char* volume) {
- return format_volume(volume, NULL);
+ return format_volume(volume, nullptr);
}
int setup_install_mounts() {
@@ -339,12 +323,12 @@
if (strcmp(v->mount_point, "/tmp") == 0 || strcmp(v->mount_point, "/cache") == 0) {
if (ensure_path_mounted(v->mount_point) != 0) {
- LOG(ERROR) << "failed to mount " << v->mount_point;
+ LOG(ERROR) << "Failed to mount " << v->mount_point;
return -1;
}
} else {
if (ensure_path_unmounted(v->mount_point) != 0) {
- LOG(ERROR) << "failed to unmount " << v->mount_point;
+ LOG(ERROR) << "Failed to unmount " << v->mount_point;
return -1;
}
}
diff --git a/tests/Android.mk b/tests/Android.mk
index f2497b8..748d9c8 100644
--- a/tests/Android.mk
+++ b/tests/Android.mk
@@ -18,7 +18,7 @@
# Unit tests
include $(CLEAR_VARS)
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_MODULE := recovery_unit_test
LOCAL_COMPATIBILITY_SUITE := device-tests
LOCAL_STATIC_LIBRARIES := \
@@ -46,7 +46,7 @@
# Manual tests
include $(CLEAR_VARS)
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_MODULE := recovery_manual_test
LOCAL_STATIC_LIBRARIES := \
libminui \
@@ -81,6 +81,7 @@
# Component tests
include $(CLEAR_VARS)
LOCAL_CFLAGS := \
+ -Wall \
-Werror \
-D_FILE_OFFSET_BITS=64
@@ -191,7 +192,7 @@
# Host tests
include $(CLEAR_VARS)
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_MODULE := recovery_host_test
LOCAL_MODULE_HOST_OS := linux
LOCAL_C_INCLUDES := bootable/recovery
diff --git a/tests/component/applypatch_test.cpp b/tests/component/applypatch_test.cpp
index 016fed9..4254289 100644
--- a/tests/component/applypatch_test.cpp
+++ b/tests/component/applypatch_test.cpp
@@ -61,14 +61,6 @@
ASSERT_TRUE(android::base::WriteStringToFile(content, fname));
}
-static bool file_cmp(const std::string& f1, const std::string& f2) {
- std::string c1;
- android::base::ReadFileToString(f1, &c1);
- std::string c2;
- android::base::ReadFileToString(f2, &c2);
- return c1 == c2;
-}
-
class ApplyPatchTest : public ::testing::Test {
public:
static void SetUpTestCase() {
diff --git a/tests/component/imgdiff_test.cpp b/tests/component/imgdiff_test.cpp
index bf25aeb..3163a57 100644
--- a/tests/component/imgdiff_test.cpp
+++ b/tests/component/imgdiff_test.cpp
@@ -16,13 +16,17 @@
#include <stdio.h>
+#include <algorithm>
#include <string>
+#include <tuple>
#include <vector>
#include <android-base/file.h>
#include <android-base/memory.h>
+#include <android-base/stringprintf.h>
#include <android-base/test_utils.h>
#include <applypatch/imgdiff.h>
+#include <applypatch/imgdiff_image.h>
#include <applypatch/imgpatch.h>
#include <gtest/gtest.h>
#include <ziparchive/zip_writer.h>
@@ -75,15 +79,20 @@
if (num_deflate != nullptr) *num_deflate = deflate;
}
+static void GenerateTarget(const std::string& src, const std::string& patch, std::string* patched) {
+ patched->clear();
+ ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
+ reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
+ [&](const unsigned char* data, size_t len) {
+ patched->append(reinterpret_cast<const char*>(data), len);
+ return len;
+ }));
+}
+
static void verify_patched_image(const std::string& src, const std::string& patch,
const std::string& tgt) {
std::string patched;
- ASSERT_EQ(0, ApplyImagePatch(reinterpret_cast<const unsigned char*>(src.data()), src.size(),
- reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
- [&patched](const unsigned char* data, size_t len) {
- patched.append(reinterpret_cast<const char*>(data), len);
- return len;
- }));
+ GenerateTarget(src, patch, &patched);
ASSERT_EQ(tgt, patched);
}
@@ -623,3 +632,323 @@
reinterpret_cast<const unsigned char*>(patch.data()), patch.size(),
[](const unsigned char* /*data*/, size_t len) { return len; }));
}
+
+static void construct_store_entry(const std::vector<std::tuple<std::string, size_t, char>>& info,
+ ZipWriter* writer) {
+ for (auto& t : info) {
+ // Create t(1) blocks of t(2), and write the data to t(0)
+ ASSERT_EQ(0, writer->StartEntry(std::get<0>(t).c_str(), 0));
+ const std::string content(std::get<1>(t) * 4096, std::get<2>(t));
+ ASSERT_EQ(0, writer->WriteBytes(content.data(), content.size()));
+ ASSERT_EQ(0, writer->FinishEntry());
+ }
+}
+
+static void construct_deflate_entry(const std::vector<std::tuple<std::string, size_t, size_t>>& info,
+ ZipWriter* writer, const std::string& data) {
+ for (auto& t : info) {
+ // t(0): entry_name; t(1): block offset; t(2) length in blocks.
+ ASSERT_EQ(0, writer->StartEntry(std::get<0>(t).c_str(), ZipWriter::kCompress));
+ ASSERT_EQ(0, writer->WriteBytes(data.data() + std::get<1>(t) * 4096, std::get<2>(t) * 4096));
+ ASSERT_EQ(0, writer->FinishEntry());
+ }
+}
+
+// Look for the generated source and patch pieces in the debug_dir and generate the target on
+// each pair. Concatenate the split target and match against the orignal one.
+static void GenerateAndCheckSplitTarget(const std::string& debug_dir, size_t count,
+ const std::string& tgt) {
+ std::string patched;
+ for (size_t i = 0; i < count; i++) {
+ std::string split_src_path = android::base::StringPrintf("%s/src-%zu", debug_dir.c_str(), i);
+ std::string split_patch_path = android::base::StringPrintf("%s/patch-%zu", debug_dir.c_str(), i);
+
+ std::string split_src;
+ std::string split_patch;
+ ASSERT_TRUE(android::base::ReadFileToString(split_src_path, &split_src));
+ ASSERT_TRUE(android::base::ReadFileToString(split_patch_path, &split_patch));
+
+ std::string split_tgt;
+ GenerateTarget(split_src, split_patch, &split_tgt);
+ patched += split_tgt;
+ }
+
+ // Verify we can get back the original target image.
+ ASSERT_EQ(tgt, patched);
+}
+
+std::vector<ImageChunk> ConstructImageChunks(
+ const std::vector<uint8_t>& content, const std::vector<std::tuple<std::string, size_t>>& info) {
+ std::vector<ImageChunk> chunks;
+ size_t start = 0;
+ for (const auto& t : info) {
+ size_t length = std::get<1>(t);
+ chunks.emplace_back(CHUNK_NORMAL, start, &content, length, std::get<0>(t));
+ start += length;
+ }
+
+ return chunks;
+}
+
+TEST(ImgdiffTest, zip_mode_split_image_smoke) {
+ std::vector<uint8_t> content;
+ content.reserve(4096 * 50);
+ uint8_t n = 0;
+ generate_n(back_inserter(content), 4096 * 50, [&n]() { return n++ / 4096; });
+
+ ZipModeImage tgt_image(false, 4096 * 10);
+ std::vector<ImageChunk> tgt_chunks = ConstructImageChunks(content, { { "a", 100 },
+ { "b", 4096 * 2 },
+ { "c", 4096 * 3 },
+ { "d", 300 },
+ { "e-0", 4096 * 10 },
+ { "e-1", 4096 * 5 },
+ { "CD", 200 } });
+ tgt_image.Initialize(std::move(tgt_chunks),
+ std::vector<uint8_t>(content.begin(), content.begin() + 82520));
+
+ tgt_image.DumpChunks();
+
+ ZipModeImage src_image(true, 4096 * 10);
+ std::vector<ImageChunk> src_chunks = ConstructImageChunks(content, { { "b", 4096 * 3 },
+ { "c-0", 4096 * 10 },
+ { "c-1", 4096 * 2 },
+ { "a", 4096 * 5 },
+ { "e-0", 4096 * 10 },
+ { "e-1", 10000 },
+ { "CD", 5000 } });
+ src_image.Initialize(std::move(src_chunks),
+ std::vector<uint8_t>(content.begin(), content.begin() + 137880));
+
+ std::vector<ZipModeImage> split_tgt_images;
+ std::vector<ZipModeImage> split_src_images;
+ std::vector<SortedRangeSet> split_src_ranges;
+
+ ZipModeImage::SplitZipModeImageWithLimit(tgt_image, src_image, &split_tgt_images,
+ &split_src_images, &split_src_ranges);
+
+ // src_piece 1: a 5 blocks, b 3 blocks
+ // src_piece 2: c-0 10 blocks
+ // src_piece 3: d 0 block, e-0 10 blocks
+ // src_piece 4: e-1 2 blocks; CD 2 blocks
+ ASSERT_EQ(split_tgt_images.size(), split_src_images.size());
+ ASSERT_EQ(static_cast<size_t>(4), split_tgt_images.size());
+
+ ASSERT_EQ(static_cast<size_t>(1), split_tgt_images[0].NumOfChunks());
+ ASSERT_EQ(static_cast<size_t>(12288), split_tgt_images[0][0].DataLengthForPatch());
+ ASSERT_EQ("4,0,3,15,20", split_src_ranges[0].ToString());
+
+ ASSERT_EQ(static_cast<size_t>(1), split_tgt_images[1].NumOfChunks());
+ ASSERT_EQ(static_cast<size_t>(12288), split_tgt_images[1][0].DataLengthForPatch());
+ ASSERT_EQ("2,3,13", split_src_ranges[1].ToString());
+
+ ASSERT_EQ(static_cast<size_t>(1), split_tgt_images[2].NumOfChunks());
+ ASSERT_EQ(static_cast<size_t>(40960), split_tgt_images[2][0].DataLengthForPatch());
+ ASSERT_EQ("2,20,30", split_src_ranges[2].ToString());
+
+ ASSERT_EQ(static_cast<size_t>(1), split_tgt_images[3].NumOfChunks());
+ ASSERT_EQ(static_cast<size_t>(16984), split_tgt_images[3][0].DataLengthForPatch());
+ ASSERT_EQ("2,30,34", split_src_ranges[3].ToString());
+}
+
+TEST(ImgdiffTest, zip_mode_store_large_apk) {
+ // Construct src and tgt zip files with limit = 10 blocks.
+ // src tgt
+ // 12 blocks 'd' 3 blocks 'a'
+ // 8 blocks 'c' 3 blocks 'b'
+ // 3 blocks 'b' 8 blocks 'c' (exceeds limit)
+ // 3 blocks 'a' 12 blocks 'd' (exceeds limit)
+ // 3 blocks 'e'
+ TemporaryFile tgt_file;
+ FILE* tgt_file_ptr = fdopen(tgt_file.fd, "wb");
+ ZipWriter tgt_writer(tgt_file_ptr);
+ construct_store_entry(
+ { { "a", 3, 'a' }, { "b", 3, 'b' }, { "c", 8, 'c' }, { "d", 12, 'd' }, { "e", 3, 'e' } },
+ &tgt_writer);
+ ASSERT_EQ(0, tgt_writer.Finish());
+ ASSERT_EQ(0, fclose(tgt_file_ptr));
+
+ TemporaryFile src_file;
+ FILE* src_file_ptr = fdopen(src_file.fd, "wb");
+ ZipWriter src_writer(src_file_ptr);
+ construct_store_entry({ { "d", 12, 'd' }, { "c", 8, 'c' }, { "b", 3, 'b' }, { "a", 3, 'a' } },
+ &src_writer);
+ ASSERT_EQ(0, src_writer.Finish());
+ ASSERT_EQ(0, fclose(src_file_ptr));
+
+ // Compute patch.
+ TemporaryFile patch_file;
+ TemporaryDir debug_dir;
+ std::vector<const char*> args = {
+ "imgdiff", "-z", "--block-limit=10", android::base::StringPrintf(
+ "--debug-dir=%s", debug_dir.path).c_str(), src_file.path, tgt_file.path, patch_file.path,
+ };
+ ASSERT_EQ(0, imgdiff(args.size(), args.data()));
+
+ std::string tgt;
+ ASSERT_TRUE(android::base::ReadFileToString(tgt_file.path, &tgt));
+
+ // Expect 4 pieces of patch.(Rougly 3'a',3'b'; 8'c'; 10'd'; 2'd'3'e')
+ GenerateAndCheckSplitTarget(debug_dir.path, 4, tgt);
+}
+
+TEST(ImgdiffTest, zip_mode_deflate_large_apk) {
+ // Generate 50 blocks of random data.
+ std::string random_data;
+ random_data.reserve(4096 * 50);
+ generate_n(back_inserter(random_data), 4096 * 50, []() { return rand() % 256; });
+
+ // Construct src and tgt zip files with limit = 10 blocks.
+ // src tgt
+ // 22 blocks, "d" 4 blocks, "a"
+ // 5 blocks, "b" 4 blocks, "b"
+ // 3 blocks, "a" 7 blocks, "c" (exceeds limit)
+ // 8 blocks, "c" 20 blocks, "d" (exceeds limit)
+ // 1 block, "f" 2 blocks, "e"
+ TemporaryFile tgt_file;
+ FILE* tgt_file_ptr = fdopen(tgt_file.fd, "wb");
+ ZipWriter tgt_writer(tgt_file_ptr);
+
+ construct_deflate_entry(
+ { { "a", 0, 4 }, { "b", 5, 4 }, { "c", 12, 8 }, { "d", 21, 20 }, { "e", 45, 2 },
+ { "f", 48, 1 } }, &tgt_writer, random_data);
+
+ ASSERT_EQ(0, tgt_writer.Finish());
+ ASSERT_EQ(0, fclose(tgt_file_ptr));
+
+ TemporaryFile src_file;
+ FILE* src_file_ptr = fdopen(src_file.fd, "wb");
+ ZipWriter src_writer(src_file_ptr);
+
+ construct_deflate_entry(
+ { { "d", 21, 22 }, { "b", 5, 5 }, { "a", 0, 3 }, { "g", 9, 1 }, { "c", 11, 8 },
+ { "f", 45, 1 } }, &src_writer, random_data);
+
+ ASSERT_EQ(0, src_writer.Finish());
+ ASSERT_EQ(0, fclose(src_file_ptr));
+
+ ZipModeImage src_image(true, 10 * 4096);
+ ZipModeImage tgt_image(false, 10 * 4096);
+ ASSERT_TRUE(src_image.Initialize(src_file.path));
+ ASSERT_TRUE(tgt_image.Initialize(tgt_file.path));
+ ASSERT_TRUE(ZipModeImage::CheckAndProcessChunks(&tgt_image, &src_image));
+
+ src_image.DumpChunks();
+ tgt_image.DumpChunks();
+
+ std::vector<ZipModeImage> split_tgt_images;
+ std::vector<ZipModeImage> split_src_images;
+ std::vector<SortedRangeSet> split_src_ranges;
+ ZipModeImage::SplitZipModeImageWithLimit(tgt_image, src_image, &split_tgt_images,
+ &split_src_images, &split_src_ranges);
+
+ // src_piece 1: a 3 blocks, b 5 blocks
+ // src_piece 2: c 8 blocks
+ // src_piece 3: d-0 10 block
+ // src_piece 4: d-1 10 blocks
+ // src_piece 5: e 1 block, CD
+ ASSERT_EQ(split_tgt_images.size(), split_src_images.size());
+ ASSERT_EQ(static_cast<size_t>(5), split_tgt_images.size());
+
+ ASSERT_EQ(static_cast<size_t>(2), split_src_images[0].NumOfChunks());
+ ASSERT_EQ("a", split_src_images[0][0].GetEntryName());
+ ASSERT_EQ("b", split_src_images[0][1].GetEntryName());
+
+ ASSERT_EQ(static_cast<size_t>(1), split_src_images[1].NumOfChunks());
+ ASSERT_EQ("c", split_src_images[1][0].GetEntryName());
+
+ ASSERT_EQ(static_cast<size_t>(0), split_src_images[2].NumOfChunks());
+ ASSERT_EQ(static_cast<size_t>(0), split_src_images[3].NumOfChunks());
+ ASSERT_EQ(static_cast<size_t>(0), split_src_images[4].NumOfChunks());
+
+ // Compute patch.
+ TemporaryFile patch_file;
+ TemporaryDir debug_dir;
+ ASSERT_TRUE(ZipModeImage::GeneratePatches(split_tgt_images, split_src_images, split_src_ranges,
+ patch_file.path, debug_dir.path));
+
+ std::string tgt;
+ ASSERT_TRUE(android::base::ReadFileToString(tgt_file.path, &tgt));
+
+ // Expect 5 pieces of patch. ["a","b"; "c"; "d-0"; "d-1"; "e"]
+ GenerateAndCheckSplitTarget(debug_dir.path, 5, tgt);
+}
+
+TEST(ImgdiffTest, zip_mode_no_match_source) {
+ // Generate 20 blocks of random data.
+ std::string random_data;
+ random_data.reserve(4096 * 20);
+ generate_n(back_inserter(random_data), 4096 * 20, []() { return rand() % 256; });
+
+ TemporaryFile tgt_file;
+ FILE* tgt_file_ptr = fdopen(tgt_file.fd, "wb");
+ ZipWriter tgt_writer(tgt_file_ptr);
+
+ construct_deflate_entry({ { "a", 0, 4 }, { "b", 5, 5 }, { "c", 11, 5 } }, &tgt_writer,
+ random_data);
+
+ ASSERT_EQ(0, tgt_writer.Finish());
+ ASSERT_EQ(0, fclose(tgt_file_ptr));
+
+ // We don't have a matching source entry.
+ TemporaryFile src_file;
+ FILE* src_file_ptr = fdopen(src_file.fd, "wb");
+ ZipWriter src_writer(src_file_ptr);
+ construct_store_entry({ { "d", 1, 'd' } }, &src_writer);
+ ASSERT_EQ(0, src_writer.Finish());
+ ASSERT_EQ(0, fclose(src_file_ptr));
+
+ // Compute patch.
+ TemporaryFile patch_file;
+ TemporaryDir debug_dir;
+ std::vector<const char*> args = {
+ "imgdiff", "-z", "--block-limit=10", android::base::StringPrintf(
+ "--debug-dir=%s", debug_dir.path).c_str(), src_file.path, tgt_file.path, patch_file.path,
+ };
+ ASSERT_EQ(0, imgdiff(args.size(), args.data()));
+
+ std::string tgt;
+ ASSERT_TRUE(android::base::ReadFileToString(tgt_file.path, &tgt));
+
+ // Expect 1 pieces of patch due to no matching source entry.
+ GenerateAndCheckSplitTarget(debug_dir.path, 1, tgt);
+}
+
+TEST(ImgdiffTest, zip_mode_large_enough_limit) {
+ // Generate 20 blocks of random data.
+ std::string random_data;
+ random_data.reserve(4096 * 20);
+ generate_n(back_inserter(random_data), 4096 * 20, []() { return rand() % 256; });
+
+ TemporaryFile tgt_file;
+ FILE* tgt_file_ptr = fdopen(tgt_file.fd, "wb");
+ ZipWriter tgt_writer(tgt_file_ptr);
+
+ construct_deflate_entry({ { "a", 0, 10 }, { "b", 10, 5 } }, &tgt_writer, random_data);
+
+ ASSERT_EQ(0, tgt_writer.Finish());
+ ASSERT_EQ(0, fclose(tgt_file_ptr));
+
+ // Construct 10 blocks of source.
+ TemporaryFile src_file;
+ FILE* src_file_ptr = fdopen(src_file.fd, "wb");
+ ZipWriter src_writer(src_file_ptr);
+ construct_deflate_entry({ { "a", 1, 10 } }, &src_writer, random_data);
+ ASSERT_EQ(0, src_writer.Finish());
+ ASSERT_EQ(0, fclose(src_file_ptr));
+
+ // Compute patch with a limit of 20 blocks.
+ TemporaryFile patch_file;
+ TemporaryDir debug_dir;
+ std::vector<const char*> args = {
+ "imgdiff", "-z", "--block-limit=20", android::base::StringPrintf(
+ "--debug-dir=%s", debug_dir.path).c_str(), src_file.path, tgt_file.path, patch_file.path,
+ };
+ ASSERT_EQ(0, imgdiff(args.size(), args.data()));
+
+ std::string tgt;
+ ASSERT_TRUE(android::base::ReadFileToString(tgt_file.path, &tgt));
+
+ // Expect 1 pieces of patch since limit is larger than the zip file size.
+ GenerateAndCheckSplitTarget(debug_dir.path, 1, tgt);
+}
diff --git a/tests/component/install_test.cpp b/tests/component/install_test.cpp
index 968196f..7bb4960 100644
--- a/tests/component/install_test.cpp
+++ b/tests/component/install_test.cpp
@@ -19,6 +19,7 @@
#include <sys/types.h>
#include <unistd.h>
+#include <algorithm>
#include <string>
#include <vector>
@@ -198,8 +199,8 @@
CloseArchive(zip);
}
-TEST(InstallTest, update_binary_command_smoke) {
#ifdef AB_OTA_UPDATER
+static void VerifyAbUpdateBinaryCommand(const std::string& serialno, bool success = true) {
TemporaryFile temp_file;
FILE* zip_file = fdopen(temp_file.fd, "w");
ZipWriter writer(zip_file);
@@ -215,11 +216,13 @@
ASSERT_NE("", device);
std::string timestamp = android::base::GetProperty("ro.build.date.utc", "");
ASSERT_NE("", timestamp);
- std::string metadata = android::base::Join(
- std::vector<std::string>{
- "ota-type=AB", "pre-device=" + device, "post-timestamp=" + timestamp,
- },
- "\n");
+
+ std::vector<std::string> meta{ "ota-type=AB", "pre-device=" + device,
+ "post-timestamp=" + timestamp };
+ if (!serialno.empty()) {
+ meta.push_back("serialno=" + serialno);
+ }
+ std::string metadata = android::base::Join(meta, "\n");
ASSERT_EQ(0, writer.WriteBytes(metadata.data(), metadata.size()));
ASSERT_EQ(0, writer.FinishEntry());
ASSERT_EQ(0, writer.Finish());
@@ -234,14 +237,25 @@
std::string package = "/path/to/update.zip";
std::string binary_path = "/sbin/update_engine_sideload";
std::vector<std::string> cmd;
- ASSERT_EQ(0, update_binary_command(package, zip, binary_path, 0, status_fd, &cmd));
- ASSERT_EQ(5U, cmd.size());
- ASSERT_EQ(binary_path, cmd[0]);
- ASSERT_EQ("--payload=file://" + package, cmd[1]);
- ASSERT_EQ("--offset=" + std::to_string(payload_entry.offset), cmd[2]);
- ASSERT_EQ("--headers=" + properties, cmd[3]);
- ASSERT_EQ("--status_fd=" + std::to_string(status_fd), cmd[4]);
+ if (success) {
+ ASSERT_EQ(0, update_binary_command(package, zip, binary_path, 0, status_fd, &cmd));
+ ASSERT_EQ(5U, cmd.size());
+ ASSERT_EQ(binary_path, cmd[0]);
+ ASSERT_EQ("--payload=file://" + package, cmd[1]);
+ ASSERT_EQ("--offset=" + std::to_string(payload_entry.offset), cmd[2]);
+ ASSERT_EQ("--headers=" + properties, cmd[3]);
+ ASSERT_EQ("--status_fd=" + std::to_string(status_fd), cmd[4]);
+ } else {
+ ASSERT_EQ(INSTALL_ERROR, update_binary_command(package, zip, binary_path, 0, status_fd, &cmd));
+ }
CloseArchive(zip);
+}
+#endif // AB_OTA_UPDATER
+
+TEST(InstallTest, update_binary_command_smoke) {
+#ifdef AB_OTA_UPDATER
+ // Empty serialno will pass the verification.
+ VerifyAbUpdateBinaryCommand({});
#else
TemporaryFile temp_file;
FILE* zip_file = fdopen(temp_file.fd, "w");
@@ -340,3 +354,34 @@
CloseArchive(zip);
#endif // AB_OTA_UPDATER
}
+
+#ifdef AB_OTA_UPDATER
+TEST(InstallTest, update_binary_command_multiple_serialno) {
+ std::string serialno = android::base::GetProperty("ro.serialno", "");
+ ASSERT_NE("", serialno);
+
+ // Single matching serialno will pass the verification.
+ VerifyAbUpdateBinaryCommand(serialno);
+
+ static constexpr char alphabet[] =
+ "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
+ auto generator = []() { return alphabet[rand() % (sizeof(alphabet) - 1)]; };
+
+ // Generate 900 random serial numbers.
+ std::string random_serial;
+ for (size_t i = 0; i < 900; i++) {
+ generate_n(back_inserter(random_serial), serialno.size(), generator);
+ random_serial.append("|");
+ }
+ // Random serialnos should fail the verification.
+ VerifyAbUpdateBinaryCommand(random_serial, false);
+
+ std::string long_serial = random_serial + serialno + "|";
+ for (size_t i = 0; i < 99; i++) {
+ generate_n(back_inserter(long_serial), serialno.size(), generator);
+ long_serial.append("|");
+ }
+ // String with the matching serialno should pass the verification.
+ VerifyAbUpdateBinaryCommand(long_serial);
+}
+#endif // AB_OTA_UPDATER
diff --git a/tests/unit/dirutil_test.cpp b/tests/unit/dirutil_test.cpp
index 5e2ae4f..7f85d13 100644
--- a/tests/unit/dirutil_test.cpp
+++ b/tests/unit/dirutil_test.cpp
@@ -26,23 +26,23 @@
TEST(DirUtilTest, create_invalid) {
// Requesting to create an empty dir is invalid.
- ASSERT_EQ(-1, dirCreateHierarchy("", 0755, nullptr, false, nullptr));
+ ASSERT_EQ(-1, mkdir_recursively("", 0755, false, nullptr));
ASSERT_EQ(ENOENT, errno);
// Requesting to strip the name with no slash present.
- ASSERT_EQ(-1, dirCreateHierarchy("abc", 0755, nullptr, true, nullptr));
+ ASSERT_EQ(-1, mkdir_recursively("abc", 0755, true, nullptr));
ASSERT_EQ(ENOENT, errno);
// Creating a dir that already exists.
TemporaryDir td;
- ASSERT_EQ(0, dirCreateHierarchy(td.path, 0755, nullptr, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(td.path, 0755, false, nullptr));
// "///" is a valid dir.
- ASSERT_EQ(0, dirCreateHierarchy("///", 0755, nullptr, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively("///", 0755, false, nullptr));
// Request to create a dir, but a file with the same name already exists.
TemporaryFile tf;
- ASSERT_EQ(-1, dirCreateHierarchy(tf.path, 0755, nullptr, false, nullptr));
+ ASSERT_EQ(-1, mkdir_recursively(tf.path, 0755, false, nullptr));
ASSERT_EQ(ENOTDIR, errno);
}
@@ -51,7 +51,7 @@
std::string prefix(td.path);
std::string path = prefix + "/a/b";
constexpr mode_t mode = 0755;
- ASSERT_EQ(0, dirCreateHierarchy(path.c_str(), mode, nullptr, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(path, mode, false, nullptr));
// Verify.
struct stat sb;
@@ -69,7 +69,7 @@
TemporaryDir td;
std::string prefix(td.path);
std::string path = prefix + "/a/b";
- ASSERT_EQ(0, dirCreateHierarchy(path.c_str(), 0755, nullptr, true, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(path, 0755, true, nullptr));
// Verify that "../a" exists but not "../a/b".
struct stat sb;
@@ -83,31 +83,21 @@
ASSERT_EQ(0, rmdir((prefix + "/a").c_str()));
}
-TEST(DirUtilTest, create_mode_and_timestamp) {
+TEST(DirUtilTest, create_mode) {
TemporaryDir td;
std::string prefix(td.path);
std::string path = prefix + "/a/b";
- // Set the timestamp to 8/1/2008.
- constexpr struct utimbuf timestamp = { 1217592000, 1217592000 };
constexpr mode_t mode = 0751;
- ASSERT_EQ(0, dirCreateHierarchy(path.c_str(), mode, ×tamp, false, nullptr));
+ ASSERT_EQ(0, mkdir_recursively(path, mode, false, nullptr));
- // Verify the mode and timestamp for "../a/b".
+ // Verify the mode for "../a/b".
struct stat sb;
ASSERT_EQ(0, stat(path.c_str(), &sb)) << strerror(errno);
ASSERT_TRUE(S_ISDIR(sb.st_mode));
constexpr mode_t mask = S_IRWXU | S_IRWXG | S_IRWXO;
ASSERT_EQ(mode, sb.st_mode & mask);
- timespec time;
- time.tv_sec = 1217592000;
- time.tv_nsec = 0;
-
- ASSERT_EQ(time.tv_sec, static_cast<long>(sb.st_atime));
- ASSERT_EQ(time.tv_sec, static_cast<long>(sb.st_mtime));
-
- // Verify the mode for "../a". Note that the timestamp for intermediate directories (e.g. "../a")
- // may not be 'timestamp' according to the current implementation.
+ // Verify the mode for "../a".
ASSERT_EQ(0, stat((prefix + "/a").c_str(), &sb)) << strerror(errno);
ASSERT_TRUE(S_ISDIR(sb.st_mode));
ASSERT_EQ(mode, sb.st_mode & mask);
@@ -116,35 +106,3 @@
ASSERT_EQ(0, rmdir((prefix + "/a/b").c_str()));
ASSERT_EQ(0, rmdir((prefix + "/a").c_str()));
}
-
-TEST(DirUtilTest, unlink_invalid) {
- // File doesn't exist.
- ASSERT_EQ(-1, dirUnlinkHierarchy("doesntexist"));
-
- // Nonexistent directory.
- TemporaryDir td;
- std::string path(td.path);
- ASSERT_EQ(-1, dirUnlinkHierarchy((path + "/a").c_str()));
- ASSERT_EQ(ENOENT, errno);
-}
-
-TEST(DirUtilTest, unlink_smoke) {
- // Unlink a file.
- TemporaryFile tf;
- ASSERT_EQ(0, dirUnlinkHierarchy(tf.path));
- ASSERT_EQ(-1, access(tf.path, F_OK));
-
- TemporaryDir td;
- std::string path(td.path);
- constexpr mode_t mode = 0700;
- ASSERT_EQ(0, mkdir((path + "/a").c_str(), mode));
- ASSERT_EQ(0, mkdir((path + "/a/b").c_str(), mode));
- ASSERT_EQ(0, mkdir((path + "/a/b/c").c_str(), mode));
- ASSERT_EQ(0, mkdir((path + "/a/d").c_str(), mode));
-
- // Remove "../a" recursively.
- ASSERT_EQ(0, dirUnlinkHierarchy((path + "/a").c_str()));
-
- // Verify it's gone.
- ASSERT_EQ(-1, access((path + "/a").c_str(), F_OK));
-}
diff --git a/tests/unit/rangeset_test.cpp b/tests/unit/rangeset_test.cpp
index 3c6d77e..15bcec8 100644
--- a/tests/unit/rangeset_test.cpp
+++ b/tests/unit/rangeset_test.cpp
@@ -21,7 +21,7 @@
#include <gtest/gtest.h>
-#include "updater/rangeset.h"
+#include "rangeset.h"
TEST(RangeSetTest, Parse_smoke) {
RangeSet rs = RangeSet::Parse("2,1,10");
@@ -110,3 +110,50 @@
}
ASSERT_EQ((std::vector<Range>{ Range{ 8, 10 }, Range{ 1, 5 } }), ranges);
}
+
+TEST(RangeSetTest, tostring) {
+ ASSERT_EQ("2,1,6", RangeSet::Parse("2,1,6").ToString());
+ ASSERT_EQ("4,1,5,8,10", RangeSet::Parse("4,1,5,8,10").ToString());
+ ASSERT_EQ("6,1,3,4,6,15,22", RangeSet::Parse("6,1,3,4,6,15,22").ToString());
+}
+
+TEST(SortedRangeSetTest, insertion) {
+ SortedRangeSet rs({ { 2, 3 }, { 4, 6 }, { 8, 14 } });
+ rs.Insert({ 1, 2 });
+ ASSERT_EQ(SortedRangeSet({ { 1, 3 }, { 4, 6 }, { 8, 14 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(10), rs.blocks());
+ rs.Insert({ 3, 5 });
+ ASSERT_EQ(SortedRangeSet({ { 1, 6 }, { 8, 14 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(11), rs.blocks());
+
+ SortedRangeSet r1({ { 20, 22 }, { 15, 18 } });
+ rs.Insert(r1);
+ ASSERT_EQ(SortedRangeSet({ { 1, 6 }, { 8, 14 }, { 15, 18 }, { 20, 22 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(16), rs.blocks());
+
+ SortedRangeSet r2({ { 2, 7 }, { 15, 21 }, { 20, 25 } });
+ rs.Insert(r2);
+ ASSERT_EQ(SortedRangeSet({ { 1, 7 }, { 8, 14 }, { 15, 25 } }), rs);
+ ASSERT_EQ(static_cast<size_t>(22), rs.blocks());
+}
+
+TEST(SortedRangeSetTest, file_range) {
+ SortedRangeSet rs;
+ rs.Insert(4096, 4096);
+ ASSERT_EQ(SortedRangeSet({ { 1, 2 } }), rs);
+ // insert block 2-9
+ rs.Insert(4096 * 3 - 1, 4096 * 7);
+ ASSERT_EQ(SortedRangeSet({ { 1, 10 } }), rs);
+ // insert block 15-19
+ rs.Insert(4096 * 15 + 1, 4096 * 4);
+ ASSERT_EQ(SortedRangeSet({ { 1, 10 }, { 15, 20 } }), rs);
+
+ // rs overlaps block 2-2
+ ASSERT_TRUE(rs.Overlaps(4096 * 2 - 1, 10));
+ ASSERT_FALSE(rs.Overlaps(4096 * 10, 4096 * 5));
+
+ ASSERT_EQ(static_cast<size_t>(10), rs.GetOffsetInRangeSet(4106));
+ ASSERT_EQ(static_cast<size_t>(40970), rs.GetOffsetInRangeSet(4096 * 16 + 10));
+ // block#10 not in range.
+ ASSERT_EXIT(rs.GetOffsetInRangeSet(40970), ::testing::KilledBySignal(SIGABRT), "");
+}
\ No newline at end of file
diff --git a/tools/recovery_l10n/res/values-en-rCA/strings.xml b/tools/recovery_l10n/res/values-en-rCA/strings.xml
deleted file mode 100644
index dc75c23..0000000
--- a/tools/recovery_l10n/res/values-en-rCA/strings.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<resources xmlns:android="http://schemas.android.com/apk/res/android"
- xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
- <string name="recovery_installing" msgid="2013591905463558223">"Installing system update"</string>
- <string name="recovery_erasing" msgid="7334826894904037088">"Erasing"</string>
- <string name="recovery_no_command" msgid="4465476568623024327">"No command"</string>
- <string name="recovery_error" msgid="5748178989622716736">"Error!"</string>
- <string name="recovery_installing_security" msgid="9184031299717114342">"Installing security update"</string>
-</resources>
diff --git a/tools/recovery_l10n/res/values-en-rXC/strings.xml b/tools/recovery_l10n/res/values-en-rXC/strings.xml
deleted file mode 100644
index 2d528b3..0000000
--- a/tools/recovery_l10n/res/values-en-rXC/strings.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<resources xmlns:android="http://schemas.android.com/apk/res/android"
- xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
- <string name="recovery_installing" msgid="2013591905463558223">"Installing system update"</string>
- <string name="recovery_erasing" msgid="7334826894904037088">"Erasing"</string>
- <string name="recovery_no_command" msgid="4465476568623024327">"No command"</string>
- <string name="recovery_error" msgid="5748178989622716736">"Error!"</string>
- <string name="recovery_installing_security" msgid="9184031299717114342">"Installing security update"</string>
-</resources>
diff --git a/tools/recovery_l10n/res/values-hi/strings.xml b/tools/recovery_l10n/res/values-hi/strings.xml
index 65d0033..a8a876e 100644
--- a/tools/recovery_l10n/res/values-hi/strings.xml
+++ b/tools/recovery_l10n/res/values-hi/strings.xml
@@ -3,7 +3,7 @@
xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
<string name="recovery_installing" msgid="2013591905463558223">"सिस्टम अपडेट इंस्टॉल किया जा रहा है"</string>
<string name="recovery_erasing" msgid="7334826894904037088">"मिटाया जा रहा है"</string>
- <string name="recovery_no_command" msgid="4465476568623024327">"कोई निर्देश नहीं मिला"</string>
+ <string name="recovery_no_command" msgid="4465476568623024327">"कोई आदेश नहीं"</string>
<string name="recovery_error" msgid="5748178989622716736">"गड़बड़ी!"</string>
<string name="recovery_installing_security" msgid="9184031299717114342">"सुरक्षा अपडेट इंस्टॉल किया जा रहा है"</string>
</resources>
diff --git a/tools/recovery_l10n/res/values-mr/strings.xml b/tools/recovery_l10n/res/values-mr/strings.xml
index 5f82033..8cf86f7 100644
--- a/tools/recovery_l10n/res/values-mr/strings.xml
+++ b/tools/recovery_l10n/res/values-mr/strings.xml
@@ -1,9 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<resources xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
- <string name="recovery_installing" msgid="2013591905463558223">"सिस्टम अपडेट इंस्टॉल करत आहे"</string>
+ <string name="recovery_installing" msgid="2013591905463558223">"सिस्टम अद्यतन स्थापित करीत आहे"</string>
<string name="recovery_erasing" msgid="7334826894904037088">"मिटवत आहे"</string>
- <string name="recovery_no_command" msgid="4465476568623024327">"कोणतीही कमांड नाही"</string>
- <string name="recovery_error" msgid="5748178989622716736">"एरर!"</string>
- <string name="recovery_installing_security" msgid="9184031299717114342">"सुरक्षा अपडेट इंस्टॉल करत आहे"</string>
+ <string name="recovery_no_command" msgid="4465476568623024327">"कोणताही आदेश नाही"</string>
+ <string name="recovery_error" msgid="5748178989622716736">"त्रुटी!"</string>
+ <string name="recovery_installing_security" msgid="9184031299717114342">"सुरक्षा अद्यतन स्थापित करीत आहे"</string>
</resources>
diff --git a/tools/recovery_l10n/res/values-te/strings.xml b/tools/recovery_l10n/res/values-te/strings.xml
index e35c82b..cfb02c9 100644
--- a/tools/recovery_l10n/res/values-te/strings.xml
+++ b/tools/recovery_l10n/res/values-te/strings.xml
@@ -4,6 +4,6 @@
<string name="recovery_installing" msgid="2013591905463558223">"సిస్టమ్ నవీకరణను ఇన్స్టాల్ చేస్తోంది"</string>
<string name="recovery_erasing" msgid="7334826894904037088">"డేటాను తొలగిస్తోంది"</string>
<string name="recovery_no_command" msgid="4465476568623024327">"ఆదేశం లేదు"</string>
- <string name="recovery_error" msgid="5748178989622716736">"ఎర్రర్ సంభవించింది!"</string>
+ <string name="recovery_error" msgid="5748178989622716736">"లోపం సంభవించింది!"</string>
<string name="recovery_installing_security" msgid="9184031299717114342">"భద్రతా నవీకరణను ఇన్స్టాల్ చేస్తోంది"</string>
</resources>
diff --git a/uncrypt/Android.mk b/uncrypt/Android.mk
index 59084b0..a3b0ca9 100644
--- a/uncrypt/Android.mk
+++ b/uncrypt/Android.mk
@@ -16,7 +16,6 @@
include $(CLEAR_VARS)
-LOCAL_CLANG := true
LOCAL_SRC_FILES := uncrypt.cpp
LOCAL_C_INCLUDES := $(LOCAL_PATH)/..
LOCAL_MODULE := uncrypt
@@ -26,7 +25,7 @@
liblog \
libfs_mgr \
libcutils
-LOCAL_CFLAGS := -Werror
+LOCAL_CFLAGS := -Wall -Werror
LOCAL_INIT_RC := uncrypt.rc
include $(BUILD_EXECUTABLE)
diff --git a/uncrypt/uncrypt.cpp b/uncrypt/uncrypt.cpp
index ad3bdce..7a2ccbc 100644
--- a/uncrypt/uncrypt.cpp
+++ b/uncrypt/uncrypt.cpp
@@ -448,20 +448,20 @@
static int uncrypt(const char* input_path, const char* map_file, const int socket) {
LOG(INFO) << "update package is \"" << input_path << "\"";
- // Turn the name of the file we're supposed to convert into an
- // absolute path, so we can find what filesystem it's on.
+ // Turn the name of the file we're supposed to convert into an absolute path, so we can find
+ // what filesystem it's on.
char path[PATH_MAX+1];
- if (realpath(input_path, path) == NULL) {
+ if (realpath(input_path, path) == nullptr) {
PLOG(ERROR) << "failed to convert \"" << input_path << "\" to absolute path";
- return 1;
+ return kUncryptRealpathFindError;
}
bool encryptable;
bool encrypted;
const char* blk_dev = find_block_device(path, &encryptable, &encrypted);
- if (blk_dev == NULL) {
+ if (blk_dev == nullptr) {
LOG(ERROR) << "failed to find block device for " << path;
- return 1;
+ return kUncryptBlockDeviceFindError;
}
// If the filesystem it's on isn't encrypted, we only produce the
diff --git a/update_verifier/update_verifier_main.cpp b/update_verifier/update_verifier_main.cpp
index 46e8bbb..9dd5a0c 100644
--- a/update_verifier/update_verifier_main.cpp
+++ b/update_verifier/update_verifier_main.cpp
@@ -16,8 +16,14 @@
// See the comments in update_verifier.cpp.
+#include <android-base/logging.h>
+
#include "update_verifier/update_verifier.h"
int main(int argc, char** argv) {
+ // Set up update_verifier logging to be written to kmsg; because we may not have Logd during
+ // boot time.
+ android::base::InitLogging(argv, &android::base::KernelLogger);
+
return update_verifier(argc, argv);
}
diff --git a/updater/Android.mk b/updater/Android.mk
index 86dc48e..1218160 100644
--- a/updater/Android.mk
+++ b/updater/Android.mk
@@ -66,6 +66,7 @@
external/e2fsprogs/misc
LOCAL_CFLAGS := \
+ -Wall \
-Wno-unused-parameter \
-Werror
@@ -91,6 +92,7 @@
$(LOCAL_PATH)/include
LOCAL_CFLAGS := \
+ -Wall \
-Wno-unused-parameter \
-Werror
diff --git a/updater/blockimg.cpp b/updater/blockimg.cpp
index a0b9ad2..fe21dd0 100644
--- a/updater/blockimg.cpp
+++ b/updater/blockimg.cpp
@@ -53,8 +53,8 @@
#include "error_code.h"
#include "ota_io.h"
#include "print_sha1.h"
+#include "rangeset.h"
#include "updater/install.h"
-#include "updater/rangeset.h"
#include "updater/updater.h"
// Set this to 0 to interpret 'erase' transfers to mean do a
diff --git a/updater/include/updater/rangeset.h b/updater/include/updater/rangeset.h
deleted file mode 100644
index fad0380..0000000
--- a/updater/include/updater/rangeset.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <stddef.h>
-
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <android-base/logging.h>
-#include <android-base/parseint.h>
-#include <android-base/strings.h>
-
-using Range = std::pair<size_t, size_t>;
-
-class RangeSet {
- public:
- RangeSet() : blocks_(0) {}
-
- explicit RangeSet(std::vector<Range>&& pairs) {
- CHECK_NE(pairs.size(), static_cast<size_t>(0)) << "Invalid number of tokens";
-
- // Sanity check the input.
- size_t result = 0;
- for (const auto& range : pairs) {
- CHECK_LT(range.first, range.second)
- << "Empty or negative range: " << range.first << ", " << range.second;
- size_t sz = range.second - range.first;
- CHECK_LE(result, SIZE_MAX - sz) << "RangeSet size overflow";
- result += sz;
- }
-
- ranges_ = pairs;
- blocks_ = result;
- }
-
- static RangeSet Parse(const std::string& range_text) {
- std::vector<std::string> pieces = android::base::Split(range_text, ",");
- CHECK_GE(pieces.size(), static_cast<size_t>(3)) << "Invalid range text: " << range_text;
-
- size_t num;
- CHECK(android::base::ParseUint(pieces[0], &num, static_cast<size_t>(INT_MAX)))
- << "Failed to parse the number of tokens: " << range_text;
-
- CHECK_NE(num, static_cast<size_t>(0)) << "Invalid number of tokens: " << range_text;
- CHECK_EQ(num % 2, static_cast<size_t>(0)) << "Number of tokens must be even: " << range_text;
- CHECK_EQ(num, pieces.size() - 1) << "Mismatching number of tokens: " << range_text;
-
- std::vector<Range> pairs;
- for (size_t i = 0; i < num; i += 2) {
- size_t first;
- CHECK(android::base::ParseUint(pieces[i + 1], &first, static_cast<size_t>(INT_MAX)));
- size_t second;
- CHECK(android::base::ParseUint(pieces[i + 2], &second, static_cast<size_t>(INT_MAX)));
-
- pairs.emplace_back(first, second);
- }
-
- return RangeSet(std::move(pairs));
- }
-
- // Get the block number for the i-th (starting from 0) block in the RangeSet.
- size_t GetBlockNumber(size_t idx) const {
- CHECK_LT(idx, blocks_) << "Out of bound index " << idx << " (total blocks: " << blocks_ << ")";
-
- for (const auto& range : ranges_) {
- if (idx < range.second - range.first) {
- return range.first + idx;
- }
- idx -= (range.second - range.first);
- }
-
- CHECK(false) << "Failed to find block number for index " << idx;
- return 0; // Unreachable, but to make compiler happy.
- }
-
- // RangeSet has half-closed half-open bounds. For example, "3,5" contains blocks 3 and 4. So "3,5"
- // and "5,7" are not overlapped.
- bool Overlaps(const RangeSet& other) const {
- for (const auto& range : ranges_) {
- size_t start = range.first;
- size_t end = range.second;
- for (const auto& other_range : other.ranges_) {
- size_t other_start = other_range.first;
- size_t other_end = other_range.second;
- // [start, end) vs [other_start, other_end)
- if (!(other_start >= end || start >= other_end)) {
- return true;
- }
- }
- }
- return false;
- }
-
- // size() gives the number of Range's in this RangeSet.
- size_t size() const {
- return ranges_.size();
- }
-
- // blocks() gives the number of all blocks in this RangeSet.
- size_t blocks() const {
- return blocks_;
- }
-
- // We provide const iterators only.
- std::vector<Range>::const_iterator cbegin() const {
- return ranges_.cbegin();
- }
-
- std::vector<Range>::const_iterator cend() const {
- return ranges_.cend();
- }
-
- // Need to provide begin()/end() since range-based loop expects begin()/end().
- std::vector<Range>::const_iterator begin() const {
- return ranges_.cbegin();
- }
-
- std::vector<Range>::const_iterator end() const {
- return ranges_.cend();
- }
-
- // Reverse const iterators for MoveRange().
- std::vector<Range>::const_reverse_iterator crbegin() const {
- return ranges_.crbegin();
- }
-
- std::vector<Range>::const_reverse_iterator crend() const {
- return ranges_.crend();
- }
-
- const Range& operator[](size_t i) const {
- return ranges_[i];
- }
-
- bool operator==(const RangeSet& other) const {
- // The orders of Range's matter. "4,1,5,8,10" != "4,8,10,1,5".
- return (ranges_ == other.ranges_);
- }
-
- bool operator!=(const RangeSet& other) const {
- return ranges_ != other.ranges_;
- }
-
- private:
- // Actual limit for each value and the total number are both INT_MAX.
- std::vector<Range> ranges_;
- size_t blocks_;
-};
diff --git a/updater/install.cpp b/updater/install.cpp
index bfe91e7..8e54c2e 100644
--- a/updater/install.cpp
+++ b/updater/install.cpp
@@ -95,34 +95,6 @@
uiPrint(state, error_msg);
}
-static bool is_dir(const std::string& dirpath) {
- struct stat st;
- return stat(dirpath.c_str(), &st) == 0 && S_ISDIR(st.st_mode);
-}
-
-// Create all parent directories of name, if necessary.
-static bool make_parents(const std::string& name) {
- size_t prev_end = 0;
- while (prev_end < name.size()) {
- size_t next_end = name.find('/', prev_end + 1);
- if (next_end == std::string::npos) {
- break;
- }
- std::string dir_path = name.substr(0, next_end);
- if (!is_dir(dir_path)) {
- int result = mkdir(dir_path.c_str(), 0700);
- if (result != 0) {
- PLOG(ERROR) << "failed to mkdir " << dir_path << " when make parents for " << name;
- return false;
- }
-
- LOG(INFO) << "created [" << dir_path << "]";
- }
- prev_end = next_end;
- }
- return true;
-}
-
// mount(fs_type, partition_type, location, mount_point)
// mount(fs_type, partition_type, location, mount_point, mount_options)
diff --git a/wear_ui.cpp b/wear_ui.cpp
index 624116c..1859b13 100644
--- a/wear_ui.cpp
+++ b/wear_ui.cpp
@@ -16,40 +16,16 @@
#include "wear_ui.h"
-#include <errno.h>
-#include <fcntl.h>
-#include <stdarg.h>
-#include <stdlib.h>
+#include <pthread.h>
+#include <stdio.h> // TODO: Remove after killing the call to sprintf().
#include <string.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
#include <string>
-#include <vector>
#include <android-base/properties.h>
#include <android-base/strings.h>
-#include <android-base/stringprintf.h>
#include <minui/minui.h>
-#include "common.h"
-#include "device.h"
-
-// There's only (at most) one of these objects, and global callbacks
-// (for pthread_create, and the input event system) need to find it,
-// so use a global variable.
-static WearRecoveryUI* self = NULL;
-
-// Return the current time as a double (including fractions of a second).
-static double now() {
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return tv.tv_sec + tv.tv_usec / 1000000.0;
-}
-
WearRecoveryUI::WearRecoveryUI()
: kProgressBarBaseline(RECOVERY_UI_PROGRESS_BAR_BASELINE),
kMenuUnusableRows(RECOVERY_UI_MENU_UNUSABLE_ROWS) {
@@ -61,10 +37,6 @@
loop_frames = 60;
touch_screen_allowed_ = true;
-
- for (size_t i = 0; i < 5; i++) backgroundIcon[i] = NULL;
-
- self = this;
}
int WearRecoveryUI::GetProgressBaseline() const {
@@ -80,24 +52,12 @@
gr_fill(0, 0, gr_fb_width(), gr_fb_height());
if (currentIcon != NONE) {
- GRSurface* surface;
- if (currentIcon == INSTALLING_UPDATE || currentIcon == ERASING) {
- if (!intro_done) {
- surface = introFrames[current_frame];
- } else {
- surface = loopFrames[current_frame];
- }
- } else {
- surface = backgroundIcon[currentIcon];
- }
-
- int width = gr_get_width(surface);
- int height = gr_get_height(surface);
-
- int x = (gr_fb_width() - width) / 2;
- int y = (gr_fb_height() - height) / 2;
-
- gr_blit(surface, 0, 0, width, height, x, y);
+ GRSurface* frame = GetCurrentFrame();
+ int frame_width = gr_get_width(frame);
+ int frame_height = gr_get_height(frame);
+ int frame_x = (gr_fb_width() - frame_width) / 2;
+ int frame_y = (gr_fb_height() - frame_height) / 2;
+ gr_blit(frame, 0, 0, frame_width, frame_height, frame_x, frame_y);
}
}
@@ -173,7 +133,6 @@
// display from the bottom up, until we hit the top of the
// screen, the bottom of the menu, or we've displayed the
// entire text buffer.
- int ty;
int row = (text_top_ + text_rows_ - 1) % text_rows_;
size_t count = 0;
for (int ty = gr_fb_height() - char_height_ - kMarginHeight; ty > y + 2 && count < text_rows_;
@@ -191,65 +150,9 @@
gr_flip();
}
-bool WearRecoveryUI::InitTextParams() {
- if (!ScreenRecoveryUI::InitTextParams()) {
- return false;
- }
-
- text_cols_ = (gr_fb_width() - (kMarginWidth * 2)) / char_width_;
-
- if (text_rows_ > kMaxRows) text_rows_ = kMaxRows;
- if (text_cols_ > kMaxCols) text_cols_ = kMaxCols;
-
- visible_text_rows = (gr_fb_height() - (kMarginHeight * 2)) / char_height_;
- return true;
-}
-
-bool WearRecoveryUI::Init(const std::string& locale) {
- if (!ScreenRecoveryUI::Init(locale)) {
- return false;
- }
-
- LoadBitmap("icon_error", &backgroundIcon[ERROR]);
- backgroundIcon[NO_COMMAND] = backgroundIcon[ERROR];
-
- // This leaves backgroundIcon[INSTALLING_UPDATE] and backgroundIcon[ERASING]
- // as NULL which is fine since draw_background_locked() doesn't use them.
-
- return true;
-}
-
void WearRecoveryUI::SetStage(int current, int max) {
}
-void WearRecoveryUI::Print(const char* fmt, ...) {
- char buf[256];
- va_list ap;
- va_start(ap, fmt);
- vsnprintf(buf, 256, fmt, ap);
- va_end(ap);
-
- fputs(buf, stdout);
-
- // This can get called before ui_init(), so be careful.
- pthread_mutex_lock(&updateMutex);
- if (text_rows_ > 0 && text_cols_ > 0) {
- char* ptr;
- for (ptr = buf; *ptr != '\0'; ++ptr) {
- if (*ptr == '\n' || text_col_ >= text_cols_) {
- text_[text_row_][text_col_] = '\0';
- text_col_ = 0;
- text_row_ = (text_row_ + 1) % text_rows_;
- if (text_row_ == text_top_) text_top_ = (text_top_ + 1) % text_rows_;
- }
- if (*ptr != '\n') text_[text_row_][text_col_++] = *ptr;
- }
- text_[text_row_][text_col_] = '\0';
- update_screen_locked();
- }
- pthread_mutex_unlock(&updateMutex);
-}
-
void WearRecoveryUI::StartMenu(const char* const* headers, const char* const* items,
int initial_selection) {
pthread_mutex_lock(&updateMutex);
@@ -268,7 +171,7 @@
show_menu = true;
menu_sel = initial_selection;
menu_start = 0;
- menu_end = visible_text_rows - 1 - kMenuUnusableRows;
+ menu_end = text_rows_ - 1 - kMenuUnusableRows;
if (menu_items <= menu_end) menu_end = menu_items;
update_screen_locked();
}
@@ -296,106 +199,3 @@
pthread_mutex_unlock(&updateMutex);
return sel;
}
-
-void WearRecoveryUI::ShowFile(FILE* fp) {
- std::vector<off_t> offsets;
- offsets.push_back(ftello(fp));
- ClearText();
-
- struct stat sb;
- fstat(fileno(fp), &sb);
-
- bool show_prompt = false;
- while (true) {
- if (show_prompt) {
- Print("--(%d%% of %d bytes)--",
- static_cast<int>(100 * (double(ftello(fp)) / double(sb.st_size))),
- static_cast<int>(sb.st_size));
- Redraw();
- while (show_prompt) {
- show_prompt = false;
- int key = WaitKey();
- if (key == KEY_POWER || key == KEY_ENTER) {
- return;
- } else if (key == KEY_UP || key == KEY_VOLUMEUP) {
- if (offsets.size() <= 1) {
- show_prompt = true;
- } else {
- offsets.pop_back();
- fseek(fp, offsets.back(), SEEK_SET);
- }
- } else {
- if (feof(fp)) {
- return;
- }
- offsets.push_back(ftello(fp));
- }
- }
- ClearText();
- }
-
- int ch = getc(fp);
- if (ch == EOF) {
- text_row_ = text_top_ = text_rows_ - 2;
- show_prompt = true;
- } else {
- PutChar(ch);
- if (text_col_ == 0 && text_row_ >= text_rows_ - 2) {
- text_top_ = text_row_;
- show_prompt = true;
- }
- }
- }
-}
-
-void WearRecoveryUI::PutChar(char ch) {
- pthread_mutex_lock(&updateMutex);
- if (ch != '\n') text_[text_row_][text_col_++] = ch;
- if (ch == '\n' || text_col_ >= text_cols_) {
- text_col_ = 0;
- ++text_row_;
- }
- pthread_mutex_unlock(&updateMutex);
-}
-
-void WearRecoveryUI::ShowFile(const char* filename) {
- FILE* fp = fopen_path(filename, "re");
- if (fp == nullptr) {
- Print(" Unable to open %s: %s\n", filename, strerror(errno));
- return;
- }
- ShowFile(fp);
- fclose(fp);
-}
-
-void WearRecoveryUI::PrintOnScreenOnly(const char *fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
- PrintV(fmt, false, ap);
- va_end(ap);
-}
-
-void WearRecoveryUI::PrintV(const char* fmt, bool copy_to_stdout, va_list ap) {
- std::string str;
- android::base::StringAppendV(&str, fmt, ap);
-
- if (copy_to_stdout) {
- fputs(str.c_str(), stdout);
- }
-
- pthread_mutex_lock(&updateMutex);
- if (text_rows_ > 0 && text_cols_ > 0) {
- for (const char* ptr = str.c_str(); *ptr != '\0'; ++ptr) {
- if (*ptr == '\n' || text_col_ >= text_cols_) {
- text_[text_row_][text_col_] = '\0';
- text_col_ = 0;
- text_row_ = (text_row_ + 1) % text_rows_;
- if (text_row_ == text_top_) text_top_ = (text_top_ + 1) % text_rows_;
- }
- if (*ptr != '\n') text_[text_row_][text_col_++] = *ptr;
- }
- text_[text_row_][text_col_] = '\0';
- update_screen_locked();
- }
- pthread_mutex_unlock(&updateMutex);
-}
diff --git a/wear_ui.h b/wear_ui.h
index 3bd90b6..739b4cb 100644
--- a/wear_ui.h
+++ b/wear_ui.h
@@ -19,22 +19,12 @@
#include "screen_ui.h"
-#include <string>
-
class WearRecoveryUI : public ScreenRecoveryUI {
public:
WearRecoveryUI();
- bool Init(const std::string& locale) override;
-
void SetStage(int current, int max) override;
- // printing messages
- void Print(const char* fmt, ...) override;
- void PrintOnScreenOnly(const char* fmt, ...) override __printflike(2, 3);
- void ShowFile(const char* filename) override;
- void ShowFile(FILE* fp) override;
-
// menu display
void StartMenu(const char* const* headers, const char* const* items,
int initial_selection) override;
@@ -50,30 +40,13 @@
int GetProgressBaseline() const override;
- bool InitTextParams() override;
-
void update_progress_locked() override;
- void PrintV(const char*, bool, va_list) override;
-
private:
- GRSurface* backgroundIcon[5];
-
- static const int kMaxCols = 96;
- static const int kMaxRows = 96;
-
- // Number of text rows seen on screen
- int visible_text_rows;
-
- const char* const* menu_headers_;
- int menu_start, menu_end;
-
- pthread_t progress_t;
-
void draw_background_locked() override;
void draw_screen_locked() override;
- void PutChar(char);
+ int menu_start, menu_end;
};
#endif // RECOVERY_WEAR_UI_H