-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbspatch.zig
More file actions
316 lines (263 loc) · 13.2 KB
/
bspatch.zig
File metadata and controls
316 lines (263 loc) · 13.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
// Copyright 2003-2005 Colin Percival
// Copyright 2024 Yoav Givati
// All rights reserved
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted providing that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// Note: This is a modified version of bspatch.
// The goal is to leverage zig's vector and slice operations to speed up the patching process.
// Aside from a compatible port, exploration into an iteration on bsdiff patch file format, and patch
// generation to improve performance and compression ratios. Eg: using zstd, compressing all
// the blocks together, modern suffix sorting, and so on.
const std = @import("std");
const builtin = @import("builtin");
const zstd = @cImport({
@cInclude("zstd.h");
});
const vectorSize = std.simd.suggestVectorLength(u8) orelse 4;
pub fn main() !void {
var allocator = std.heap.page_allocator;
var args = try std.process.argsWithAllocator(allocator);
defer args.deinit();
// skip the first arg which is the program name
_ = args.skip();
const oldFilePath = args.next() orelse "";
const newFilePath = args.next() orelse "";
const patchFilePath = args.next() orelse "";
if (oldFilePath.len == 0 or newFilePath.len == 0 or patchFilePath.len == 0) {
std.debug.print("Usage: bsdiff <oldFilePath> <newFilePath> <patchFilePath>\n", .{});
std.process.exit(1);
}
const oldFile = try std.fs.cwd().openFile(oldFilePath, .{ .mode = .read_only });
defer oldFile.close();
const oldFileSize = try oldFile.getEndPos();
const oldFileBuff = try allocator.alloc(u8, oldFileSize);
defer allocator.free(oldFileBuff);
_ = try oldFile.readAll(oldFileBuff);
const patchFile = try std.fs.cwd().openFile(patchFilePath, .{ .mode = .read_only });
defer patchFile.close();
const patchFileSize = try patchFile.getEndPos();
const patchFileBuff = try allocator.alloc(u8, patchFileSize);
defer allocator.free(patchFileBuff);
_ = try patchFile.readAll(patchFileBuff);
const newfile = try applyPatch(&allocator, oldFileBuff, patchFileBuff);
const newFile = try std.fs.cwd().createFile(newFilePath, .{});
defer newFile.close();
_ = try newFile.writeAll(newfile);
}
/// Decompression result for parallel decompression
const DecompressResult = struct {
data: []u8,
len: usize,
err: bool,
};
/// Thread function for parallel decompression
fn decompressThread(result: *DecompressResult, buffer: []u8, compressed: []const u8, blockName: []const u8) void {
const decompressedLen = zstd.ZSTD_decompress(buffer.ptr, buffer.len, compressed.ptr, compressed.len);
if (zstd.ZSTD_isError(decompressedLen) != 0) {
const errName = zstd.ZSTD_getErrorName(decompressedLen);
std.debug.print("Decompression error ({s}): {s} (compressed={d}, buffer={d})\n", .{ blockName, errName, compressed.len, buffer.len });
result.err = true;
result.len = 0;
} else {
result.err = false;
result.len = decompressedLen;
}
result.data = buffer;
}
pub fn applyPatch(allocator: *std.mem.Allocator, oldfile: []const u8, patch: []const u8) ![]u8 {
const header = patch[0..32];
// Check for appropriate magic
if (std.mem.eql(u8, header[0..8], "TRDIFF10") == false) {
std.debug.print("corrupt patch {s}\n", .{header[0..8].*});
return error.CorruptPatch;
}
// Read lengths from header
const controlLen = offtin(header[8..16]);
const diffLen = offtin(header[16..24]);
const newSize = offtin(header[24..32]);
if (controlLen < 0 or diffLen < 0 or newSize < 0) {
std.debug.print("Patch error: Negative length in header\n", .{});
return error.CorruptPatch;
}
const controlStart: usize = 32;
const diffStart: usize = controlStart + @as(usize, @intCast(controlLen));
const extraStart: usize = diffStart + @as(usize, @intCast(diffLen));
// Validate that offsets don't exceed patch size
if (diffStart > patch.len or extraStart > patch.len) {
std.debug.print("Patch error: Header lengths exceed patch file size\n", .{});
return error.CorruptPatch;
}
const newSizeUsize: usize = @intCast(newSize);
// Pre-allocate the output buffer (key optimization: no ArrayList resizing)
var newfile = try allocator.alloc(u8, newSizeUsize);
// Get the actual decompressed sizes from the zstd frame headers
// This is more reliable than guessing based on newSizeUsize
const controlCompressed = patch[controlStart..diffStart];
const diffCompressed = patch[diffStart..extraStart];
const extraCompressed = patch[extraStart..];
const controlDecompSize = zstd.ZSTD_getFrameContentSize(controlCompressed.ptr, controlCompressed.len);
const diffDecompSize = zstd.ZSTD_getFrameContentSize(diffCompressed.ptr, diffCompressed.len);
const extraDecompSize = zstd.ZSTD_getFrameContentSize(extraCompressed.ptr, extraCompressed.len);
// Check for errors in getting frame sizes
const ZSTD_CONTENTSIZE_UNKNOWN: c_ulonglong = @bitCast(@as(i64, -1));
const ZSTD_CONTENTSIZE_ERROR: c_ulonglong = @bitCast(@as(i64, -2));
if (controlDecompSize == ZSTD_CONTENTSIZE_ERROR or diffDecompSize == ZSTD_CONTENTSIZE_ERROR or extraDecompSize == ZSTD_CONTENTSIZE_ERROR) {
std.debug.print("Error: Invalid zstd frame in patch file\n", .{});
// Check first few bytes of each block to see if they look like valid zstd frames
if (controlCompressed.len >= 4) {
std.debug.print(" Control block first 4 bytes: {x:0>2} {x:0>2} {x:0>2} {x:0>2}\n", .{ controlCompressed[0], controlCompressed[1], controlCompressed[2], controlCompressed[3] });
}
if (diffCompressed.len >= 4) {
std.debug.print(" Diff block first 4 bytes: {x:0>2} {x:0>2} {x:0>2} {x:0>2}\n", .{ diffCompressed[0], diffCompressed[1], diffCompressed[2], diffCompressed[3] });
}
if (extraCompressed.len >= 4) {
std.debug.print(" Extra block first 4 bytes: {x:0>2} {x:0>2} {x:0>2} {x:0>2}\n", .{ extraCompressed[0], extraCompressed[1], extraCompressed[2], extraCompressed[3] });
}
return error.InvalidPatchFormat;
}
// If size is unknown, fall back to a generous estimate
// Note: diff block can be larger than newSize if there's significant overlap in the diff regions
// Use oldfile.len + newSizeUsize as upper bound for diff block (theoretical maximum)
const controlBufSize: usize = if (controlDecompSize == ZSTD_CONTENTSIZE_UNKNOWN) newSizeUsize * 2 else @intCast(controlDecompSize);
const diffBufSize: usize = if (diffDecompSize == ZSTD_CONTENTSIZE_UNKNOWN) oldfile.len + newSizeUsize else @intCast(diffDecompSize);
const extraBufSize: usize = if (extraDecompSize == ZSTD_CONTENTSIZE_UNKNOWN) newSizeUsize else @intCast(extraDecompSize);
// Allocate decompression buffers with correct sizes
var controlBlockBuffer = try allocator.alloc(u8, controlBufSize);
var diffBlockBuffer = try allocator.alloc(u8, diffBufSize);
var extraBlockBuffer = try allocator.alloc(u8, extraBufSize);
// Parallel decompression of all three blocks
var controlResult = DecompressResult{ .data = undefined, .len = 0, .err = false };
var diffResult = DecompressResult{ .data = undefined, .len = 0, .err = false };
var extraResult = DecompressResult{ .data = undefined, .len = 0, .err = false };
// Parallel decompression of all three blocks
const controlThread = try std.Thread.spawn(.{}, decompressThread, .{
&controlResult,
controlBlockBuffer,
patch[controlStart..diffStart],
"Control",
});
const diffThread = try std.Thread.spawn(.{}, decompressThread, .{
&diffResult,
diffBlockBuffer,
patch[diffStart..extraStart],
"Diff",
});
const extraThread = try std.Thread.spawn(.{}, decompressThread, .{
&extraResult,
extraBlockBuffer,
patch[extraStart..],
"Extra",
});
// Wait for all decompressions to complete
controlThread.join();
diffThread.join();
extraThread.join();
if (controlResult.err or diffResult.err or extraResult.err) {
std.debug.print("Decompression error\n", .{});
return error.DecompressionFailed;
}
const controlBlock = controlBlockBuffer[0..controlResult.len];
const diffBlock = diffBlockBuffer[0..diffResult.len];
const extraBlock = extraBlockBuffer[0..extraResult.len];
var controlpos: usize = 0;
var diffpos: usize = 0;
var extrapos: usize = 0;
var oldpos: usize = 0;
var newpos: usize = 0;
var entryNum: usize = 0;
// Main patching loop - optimized with direct memory writes
while (controlpos < controlResult.len) {
entryNum += 1;
// Read control data
const readDiffBy: usize = @intCast(offtin(controlBlock[controlpos .. controlpos + 8]));
controlpos += 8;
const readExtraBy: usize = @intCast(offtin(controlBlock[controlpos .. controlpos + 8]));
controlpos += 8;
const seekBy: i64 = offtin(controlBlock[controlpos .. controlpos + 8]);
controlpos += 8;
// Validate bounds before writing
if (newpos + readDiffBy > newfile.len) {
std.debug.print("Patch error: entry {d} would write past end of file (newpos={d} + diffBy={d} > size={d})\n", .{ entryNum, newpos, readDiffBy, newfile.len });
std.debug.print(" readExtraBy={d}, seekBy={d}, controlpos={d}/{d}\n", .{ readExtraBy, seekBy, controlpos, controlResult.len });
return error.PatchCorrupt;
}
if (diffpos + readDiffBy > diffResult.len) {
std.debug.print("Patch error: control data reads past end of diff block\n", .{});
return error.PatchCorrupt;
}
// Apply diff block: newfile[newpos..] = oldfile[oldpos..] + diffBlock[diffpos..]
const diffSlice = diffBlock[diffpos .. diffpos + readDiffBy];
const oldSlice = oldfile[oldpos .. oldpos + readDiffBy];
const newSlice = newfile[newpos .. newpos + readDiffBy];
// SIMD-accelerated diff application with direct memory writes
var i: usize = 0;
while (i + vectorSize <= readDiffBy) {
const oldVec: @Vector(vectorSize, u8) = oldSlice[i..][0..vectorSize].*;
const diffVec: @Vector(vectorSize, u8) = diffSlice[i..][0..vectorSize].*;
const resultVec = @addWithOverflow(oldVec, diffVec)[0];
newSlice[i..][0..vectorSize].* = resultVec;
i += vectorSize;
}
// Handle remaining bytes
while (i < readDiffBy) {
newSlice[i] = @addWithOverflow(oldSlice[i], diffSlice[i])[0];
i += 1;
}
diffpos += readDiffBy;
newpos += readDiffBy;
// Copy extra block directly (no arithmetic needed)
if (readExtraBy > 0) {
@memcpy(newfile[newpos .. newpos + readExtraBy], extraBlock[extrapos .. extrapos + readExtraBy]);
extrapos += readExtraBy;
newpos += readExtraBy;
}
oldpos = @intCast(@as(i64, @intCast(oldpos + readDiffBy)) + seekBy);
}
const newSizeMB = @as(f64, @floatFromInt(newfile.len)) / (1024.0 * 1024.0);
std.debug.print("Completed - New file: {d:.2} MB\n", .{newSizeMB});
return newfile;
}
// offtin reads an int64 (little endian)
fn offtin(buf: []const u8) i64 {
var y: i64 = 0;
y = @as(i64, (@intCast(buf[7] & 0x7f)));
y = y << 8 | @as(i64, (@intCast(buf[6])));
y = y << 8 | @as(i64, (@intCast(buf[5])));
y = y << 8 | @as(i64, (@intCast(buf[4])));
y = y << 8 | @as(i64, (@intCast(buf[3])));
y = y << 8 | @as(i64, (@intCast(buf[2])));
y = y << 8 | @as(i64, (@intCast(buf[1])));
y = y << 8 | @as(i64, (@intCast(buf[0])));
if ((buf[7] & 0x80) != 0) {
y = -y;
}
return y;
}
fn logProgressBytes(running: *bool, percent: *f32, bytes: *usize, total: usize, operation: []const u8) void {
while (running.*) {
std.time.sleep(std.time.ns_per_s * 10); // Wait 10s between messages
if (!running.*) break;
const bytesMB = @as(f64, @floatFromInt(bytes.*)) / (1024.0 * 1024.0);
const totalMB = @as(f64, @floatFromInt(total)) / (1024.0 * 1024.0);
std.debug.print("{s}... {d:.1}/{d:.1} MB ({d:.1}%)\n", .{ operation, bytesMB, totalMB, percent.* });
}
}