1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
const std = @import("std");
const MetaInfo = @import("metainfo.zig");
const bencode = @import("bencode.zig");
const AnyWriter = @import("anywriter.zig");
const peerproto = @import("peer_protocol.zig");
const trackproto = @import("tracker_protocol.zig");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const a = gpa.allocator();
// TODO figure this out. It's not that important, I think, unless
// other clients have special handling for different patterns.
// Spec looks like a bit of a free-for-all here.
var peer_id: [20]u8 = undefined;
@memcpy(&peer_id, "00112233445566778899");
const f = try std.fs.cwd().openFile("src/sample.torrent", .{});
defer f.close();
var fr = f.reader();
var mib = try bencode.bdecode(a, fr);
defer mib.deinit(a);
var mi = try MetaInfo.parse(a, mib);
defer mi.deinit(a);
const info_hash = try mi.info.hash(a);
var c = std.http.Client{
.allocator = a,
};
defer c.deinit();
const url = try trackproto.trackerRequestUrl(a, info_hash, peer_id, mi.info.files[0].length, mi.announce);
defer a.free(url);
var res = try c.fetch(a, .{ .location = .{ .url = url } });
defer res.deinit();
if (res.status != .ok) {
return error.TrackerHttpError;
}
var trb = try bencode.bdecodeBuf(a, res.body.?);
defer trb.deinit(a);
var tr = try trackproto.TrackerResp.parse(a, trb);
defer tr.deinit(a);
if (tr.peers.len == 0) {
std.log.info("no peers", .{});
return;
}
for (tr.peers) |peer| {
std.log.info("peer: {}", .{peer});
}
// Handle peers, PoC we're just going to handle 1 peer and download everything from them very simplistically.
const p = tr.peers[0];
const file = mi.info.files[0];
var ps = try std.net.tcpConnectToAddress(p);
defer ps.close();
var pw = ps.writer();
var pr = ps.reader();
var hs: peerproto.Handshake = .{
.info_hash = info_hash,
.peer_id = peer_id,
};
try hs.write(pw);
var phs = try peerproto.Handshake.read(pr);
std.log.info("peer at {} peer_id {s}", .{ p, std.fmt.fmtSliceHexLower(&phs.peer_id) });
var bf = try peerproto.readMessage(a, pr, peerproto.Bitfield);
_ = bf; // ignore it for now.
try peerproto.Interested.write(pw);
_ = try peerproto.readMessage(a, pr, peerproto.Unchoke);
var of = try std.fs.cwd().createFile(file.name, .{});
defer of.close();
errdefer {
// try to truncate the now-bad file...
of.setEndPos(0) catch {};
}
// Read the piece into memory, we'll check the hash before it goes to disk...
var piece_buf = try a.alloc(u8, mi.info.piece_length);
defer a.free(piece_buf);
for (0..mi.info.pieceCount()) |pi| {
const piece_length = @min(mi.info.piece_length, file.length - (pi * mi.info.piece_length));
var s1 = std.crypto.hash.Sha1.init(.{});
// Send a request message for each 16KiB block of the first piece
const blklen: u32 = 16*1024;
var blkcount = try std.math.divCeil(u32, piece_length, blklen);
for (0..blkcount) |i| {
const begin = std.math.cast(u32, i*blklen).?;
const len = @min(blklen, piece_length - begin);
const req = peerproto.Request{
.index = @intCast(pi),
.begin = begin,
.length = len,
};
std.log.info("Request {any}", .{req});
try req.write(pw);
var piece = try peerproto.readMessage(a, pr, peerproto.Piece);
defer piece.deinit(a);
if (piece.index != req.index) return error.ProtocolError;
if (piece.begin != req.begin) return error.ProtocolError;
if (piece.block.len != req.length) return error.ProtocolError;
s1.update(piece.block);
@memcpy(piece_buf[piece.begin..piece.begin+piece.block.len], piece.block);
}
var ah = s1.finalResult();
var ph0 = mi.info.pieceHash(pi).?;
if (std.mem.eql(u8, &ah, &ph0)) {
try of.writeAll(piece_buf[0..piece_length]);
} else {
return error.BadHash;
}
}
std.log.info("fin", .{});
}
test {
_ = bencode;
_ = MetaInfo;
_ = peerproto;
}
|