summaryrefslogtreecommitdiff
path: root/src/main.zig
blob: 34cb59c839abab2df0d3caee1ea3880400c940a4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
const std = @import("std");

// ZIP file implementation
// See spec.txt.

const ZipFile = struct {
    const CompressionMethod = enum(u16) {
        store = 0,
        deflate = 8,
        lzma = 14,
        zstd = 93,
        xz = 95,
    };
    // [local file header 1]
    // [encryption header 1]
    // [file data 1]
    // [data descriptor 1]
    // .
    // .
    // .
    // [local file header n]
    // [encryption header n]
    // [file data n]
    // [data descriptor n]
    // [archive decryption header]
    // [archive extra data record]
    // [central directory header 1]
    // .
    // .
    // .
    // [central directory header n]
    // [zip64 end of central directory record]
    // [zip64 end of central directory locator]
    // [end of central directory record]
    allocator: std.mem.Allocator,
    is_zip_64: bool = false,
    end_of_central_directory_record: EndOfCentralDirectoryRecord,
    central_directory_headers: []CentralDirectoryHeader,
    fn from(allocator: std.mem.Allocator, file_or_stream: anytype) !ZipFile {
        // Find the EndOfCentralDirectoryRecord. It must be in the last 64k of the file
        const eocdr_search_width_max: usize = 64_000;
        const epos = try file_or_stream.getEndPos();
        const eocdr_search_width: usize = @min(epos, eocdr_search_width_max);
        const eocdr_seek_start: usize = epos - eocdr_search_width;
        try file_or_stream.seekTo(eocdr_seek_start);
        var reader = file_or_stream.reader();
        const needle = @byteSwap(EndOfCentralDirectoryRecord.SIG);
        var window: u32 = try reader.readIntLittle(u32);
        while (true) {
            if (window == needle) {
                try file_or_stream.seekBy(-4);
                break;
            }
            const nb = try reader.readByte();
            window <<= 8;
            window |= nb;
        } else {
            return error.EndOfCentralDirectoryRecordNotFound;
        }
        var eocdr = try EndOfCentralDirectoryRecord.read(allocator, file_or_stream);
        errdefer eocdr.deinit(allocator);
        if (eocdr.disk_number_this != 0 or eocdr.disk_number_central_dir_start != 0) return error.SpansNotSupported;
        if (eocdr.total_central_dir_entries != eocdr.total_central_dir_entries_on_this_disk) return error.SpansNotSupported;

        var central_directory_headers = try allocator.alloc(CentralDirectoryHeader, eocdr.total_central_dir_entries);
        errdefer allocator.free(central_directory_headers);
        try file_or_stream.seekTo(eocdr.central_dir_offset);
        for (0..eocdr.total_central_dir_entries) |i| {
            central_directory_headers[i] = try CentralDirectoryHeader.read(allocator, file_or_stream);
        }

        return ZipFile{
            .allocator = allocator,
            .end_of_central_directory_record = eocdr,
            .central_directory_headers = central_directory_headers,
        };
    }
    fn deinit(self: *ZipFile) void {
        self.end_of_central_directory_record.deinit(self.allocator);
        for (0..self.central_directory_headers.len) |i| {
            self.central_directory_headers[i].deinit(self.allocator);
        }
        self.allocator.free(self.central_directory_headers);
    }

    fn count_files(self: ZipFile) u16 {
        return self.end_of_central_directory_record.total_central_dir_entries;
    }
    fn file_name(self: ZipFile, index: u16) []const u8 {
        return self.central_directory_headers[index].file_name;
    }
    fn extract(self: ZipFile, index: u16, stream_or_file_in: anytype, stream_or_file_out: anytype) !void {
        const cdh = self.central_directory_headers[index];
        try stream_or_file_in.seekTo(cdh.relative_offset_of_local_header);
        var lfh = try LocalFileHeader.read(self.allocator, stream_or_file_in);
        defer lfh.deinit(self.allocator);
        const is_encrypted = lfh.general_purpose_bit_flag.isSet(0);
        if (is_encrypted) return error.EncryptionNotSupported;

        var reader = stream_or_file_in.reader();
        var lr = std.io.limitedReader(reader, lfh.compressed_size);
        var limited_reader = lr.reader();
        switch (lfh.compression_method) {
            .store => { // no compression (store)
                var writer = stream_or_file_out.writer();
                try pump(limited_reader, writer, lfh.uncompressed_size, lfh.crc32);
            },
            .deflate => { // deflate
                var decomp = try std.compress.deflate.decompressor(self.allocator, limited_reader, null);
                defer decomp.deinit();
                var decomp_reader = decomp.reader();
                var writer = stream_or_file_out.writer();
                try pump(decomp_reader, writer, lfh.uncompressed_size, lfh.crc32);
            },
            else => {
                std.log.err("compression method {} not supported", .{lfh.compression_method});
                return error.CompressionMethodNotSupported;
            },
        }
    }
    fn pump(reader: anytype, writer: anytype, expected_size_written: usize, expected_crc32: u32) !void {
        var buf = [_]u8{0} ** 1024;
        var crc32 = std.hash.Crc32.init();
        var written: usize = 0;
        while (true) {
            const read = try reader.read(&buf);
            if (read == 0) break;
            const write = buf[0..read];
            try writer.writeAll(write);

            crc32.update(write);
            written += read;
        }
        if (written != expected_size_written) return error.WrongUncompressedSize;
        if (crc32.final() != expected_crc32) return error.WrongChecksum;
    }
};

const CentralDirectoryHeader = struct {
    const SIG: u32 = @as(u32, 0x02014b50);
    version_made_by: u16,
    version_needed_to_extract: u16,
    general_purpose_bit_flag: u16,
    compression_method: u16,
    last_mod_file_time: u16,
    last_mod_file_date: u16,
    crc32: u32,
    compressed_size: u32,
    uncompressed_size: u32,
    file_name_length: u16,
    extra_field_length: u16,
    file_comment_length: u16,
    disk_number_start: u16,
    internal_file_attributes: u16,
    external_file_attributes: u32,
    relative_offset_of_local_header: u32,
    file_name: []const u8,
    extra_field: []const u8,
    file_comment: []const u8,

    fn read(allocator: std.mem.Allocator, stream_or_file: anytype) !CentralDirectoryHeader {
        return read2(allocator, stream_or_file, CentralDirectoryHeader, &[_]Dynamic{
            .{ .field_name = "file_name", .length_field_name = "file_name_length" },
            .{ .field_name = "extra_field", .length_field_name = "extra_field_length" },
            .{ .field_name = "file_comment", .length_field_name = "file_comment_length" },
        });
    }

    fn deinit(self: *CentralDirectoryHeader, allocator: std.mem.Allocator) void {
        allocator.free(self.file_name);
        allocator.free(self.extra_field);
        allocator.free(self.file_comment);
    }
};

const EndOfCentralDirectoryRecord = struct {
    const SIG: u32 = @as(u32, 0x06054b50);
    disk_number_this: u16,
    disk_number_central_dir_start: u16,
    total_central_dir_entries_on_this_disk: u16,
    total_central_dir_entries: u16,
    size_of_central_dir: u32,
    central_dir_offset: u32,
    comment_length: u16,
    comment: []const u8,

    fn read(allocator: std.mem.Allocator, file_or_stream: anytype) !EndOfCentralDirectoryRecord {
        return read2(allocator, file_or_stream, EndOfCentralDirectoryRecord, &[_]Dynamic{
            .{ .field_name = "comment", .length_field_name = "comment_length" },
        });
    }

    fn deinit(self: *EndOfCentralDirectoryRecord, allocator: std.mem.Allocator) void {
        allocator.free(self.comment);
    }
};

const LocalFileHeader = struct {
    const SIG: u32 = @as(u32, 0x04034b50);
    version_needed_to_extract: u16,
    general_purpose_bit_flag: std.bit_set.IntegerBitSet(16),
    compression_method: ZipFile.CompressionMethod,
    last_mod_file_time: u16,
    last_mod_file_date: u16,
    crc32: u32,
    compressed_size: u32,
    uncompressed_size: u32,
    file_name_length: u16,
    extra_field_length: u16,
    file_name: []const u8,
    extra_field: []const u8,
    fn read(allocator: std.mem.Allocator, stream_or_file: anytype) !LocalFileHeader {
        return read2(allocator, stream_or_file, LocalFileHeader, &[_]Dynamic{
            .{ .field_name = "file_name", .length_field_name = "file_name_length" },
            .{ .field_name = "extra_field", .length_field_name = "extra_field_length" },
        });
    }
    fn deinit(self: *LocalFileHeader, allocator: std.mem.Allocator) void {
        allocator.free(self.file_name);
        allocator.free(self.extra_field);
    }
};

const Dynamic = struct {
    field_name: []const u8,
    length_field_name: []const u8,
};

fn read2(
    allocator: std.mem.Allocator,
    stream_or_file: anytype,
    comptime T: type,
    comptime dynamics: []const Dynamic,
) !T {
    if (!@hasDecl(T, "SIG")) @compileError("Expected decl SIG:u32 on type " ++ @typeName(T));
    const ti = @typeInfo(T);
    if (ti != .Struct) @compileError("read2 expects type parameter T to be a struct, but it was a " ++ @typeName(T));
    const si = ti.Struct;

    var reader = stream_or_file.reader();
    const sig_actual = try reader.readIntLittle(u32);
    if (sig_actual != T.SIG) {
        std.log.err("invalid signature expected {x} got {x}", .{ T.SIG, sig_actual });
        return error.InvalidSignature;
    }
    var t: T = undefined;
    inline for (si.fields) |field| {
        const fti = @typeInfo(field.type);
        dynamic: inline for (dynamics) |dyn| {
            if (comptime std.mem.eql(u8, dyn.field_name, field.name)) {
                if (fti != .Pointer) @compileError("field " ++ field.name ++ " is marked dynamic but isn't a pointer. Instead it's a " ++ @typeName(field.type));
                const pi = fti.Pointer;
                if (pi.size != .Slice) @compileError("field " ++ field.name ++ " is marked dynamic, but isn't a slice, instead it's sized " ++ @tagName(pi.size));
                const len = @field(t, dyn.length_field_name);
                var buf = try allocator.alloc(pi.child, len);
                // TODO how to errdefer in a loop, not sure where the scope ends.
                _ = try reader.readAll(buf);
                @field(t, field.name) = buf;
                break :dynamic;
            }
        } else {
            switch (fti) {
                .Int => {
                    @field(t, field.name) = try reader.readIntLittle(field.type);
                },
                .Struct => |fsi| {
                    if (fsi.backing_integer) |bi| {
                        const int = try reader.readIntLittle(bi);
                        @field(t, field.name) = @bitCast(int);
                    } else @compileError("only packed struct with backing integer are supported, field " ++ field.name ++ " type " ++ @typeName(field.type) ++ "is not such a struct");
                },
                .Enum => |fei| {
                    if (@typeInfo(fei.tag_type) == .Int) {
                        const int = try reader.readIntLittle(fei.tag_type);
                        @field(t, field.name) = @enumFromInt(int);
                    } else @compileError("only enum with integer tag type are supported field " ++ field.name ++ " type " ++ @typeName(field.type) ++ "is not such a struct");
                },
                else => @compileError("don't know  how to handle field " ++ field.name ++ " of type " ++ @tagName(fti)),
            }
        }
    }
    return t;
}

test "foo" {
    const test_zip = @embedFile("hello.zip");
    var fbs = std.io.fixedBufferStream(test_zip);
    var zf = try ZipFile.from(std.testing.allocator, &fbs);
    defer zf.deinit();
    try std.testing.expectEqual(zf.count_files(), 2);
    try std.testing.expectEqualStrings(zf.file_name(0), "hello.txt");
    try std.testing.expectEqualStrings(zf.file_name(1), "foo.txt");
}

test "foo2" {
    const test_zip = try std.fs.cwd().openFile("src/hello.zip", .{});
    var zf = try ZipFile.from(std.testing.allocator, &test_zip);
    defer zf.deinit();
    try std.testing.expectEqual(zf.count_files(), 2);
    try std.testing.expectEqualStrings(zf.file_name(0), "hello.txt");
    try std.testing.expectEqualStrings(zf.file_name(1), "foo.txt");
}

test "extract" {
    const test_zip = @embedFile("hello.zip");
    var fbs = std.io.fixedBufferStream(test_zip);
    var zf = try ZipFile.from(std.testing.allocator, &fbs);
    defer zf.deinit();
    var out = [_]u8{0} ** 1024;
    var fbs_out = std.io.fixedBufferStream(&out);
    try zf.extract(0, &fbs, &fbs_out);
    try std.testing.expectEqualStrings("Hello, Zip!", fbs_out.getWritten());
    fbs_out.reset();
    try zf.extract(1, &fbs, &fbs_out);
    try std.testing.expectEqualStrings("hi there\n", fbs_out.getWritten());
}