state: State = .startheader_bytes_buffer: []u8A fixed buffer of len max_header_bytes.
Pointers into this buffer are not stable until after a message is complete.
header_bytes_len: u32next_chunk_length: u64done: boolfalse: headers. true: trailers.
anyerror means the error set is known only at runtime.
pub const CheckCompleteHeadError = error{HttpHeadersOversize}anyerror means the error set is known only at runtime.
pub const ReadError = error{
HttpChunkInvalid,
}pub fn init(buf: []u8) HeadersParserInitializes the parser with a provided buffer buf.
buf: []u8pub fn init(buf: []u8) HeadersParser {
return .{
.header_bytes_buffer = buf,
.header_bytes_len = 0,
.done = false,
.next_chunk_length = 0,
};
}pub fn reset(hp: *HeadersParser) voidReinitialize the parser. Asserts the parser is in the "done" state.
hp: *HeadersParserpub fn reset(hp: *HeadersParser) void {
assert(hp.done);
hp.* = .{
.state = .start,
.header_bytes_buffer = hp.header_bytes_buffer,
.header_bytes_len = 0,
.done = false,
.next_chunk_length = 0,
};
}pub fn get(hp: HeadersParser) []u8hp: HeadersParserpub fn get(hp: HeadersParser) []u8 {
return hp.header_bytes_buffer[0..hp.header_bytes_len];
}pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32r: *HeadersParserbytes: []const u8pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
var hp: std.http.HeadParser = .{
.state = switch (r.state) {
.start => .start,
.seen_n => .seen_n,
.seen_r => .seen_r,
.seen_rn => .seen_rn,
.seen_rnr => .seen_rnr,
.finished => .finished,
else => unreachable,
},
};
const result = hp.feed(bytes);
r.state = switch (hp.state) {
.start => .start,
.seen_n => .seen_n,
.seen_r => .seen_r,
.seen_rn => .seen_rn,
.seen_rnr => .seen_rnr,
.finished => .finished,
};
return @intCast(result);
}pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32r: *HeadersParserbytes: []const u8pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
var cp: std.http.ChunkParser = .{
.state = switch (r.state) {
.chunk_head_size => .head_size,
.chunk_head_ext => .head_ext,
.chunk_head_r => .head_r,
.chunk_data => .data,
.chunk_data_suffix => .data_suffix,
.chunk_data_suffix_r => .data_suffix_r,
.invalid => .invalid,
else => unreachable,
},
.chunk_len = r.next_chunk_length,
};
const result = cp.feed(bytes);
r.state = switch (cp.state) {
.head_size => .chunk_head_size,
.head_ext => .chunk_head_ext,
.head_r => .chunk_head_r,
.data => .chunk_data,
.data_suffix => .chunk_data_suffix,
.data_suffix_r => .chunk_data_suffix_r,
.invalid => .invalid,
};
r.next_chunk_length = cp.chunk_len;
return @intCast(result);
}pub fn isComplete(r: *HeadersParser) boolReturns whether or not the parser has finished parsing a complete message. A message is only complete after the entire body has been read and any trailing headers have been parsed.
r: *HeadersParserpub fn isComplete(r: *HeadersParser) bool {
return r.done and r.state == .finished;
}pub fn checkCompleteHead(hp: *HeadersParser, in: []const u8) CheckCompleteHeadError!u32Pushes in into the parser. Returns the number of bytes consumed by
the header. Any header bytes are appended to header_bytes_buffer.
hp: *HeadersParserin: []const u8pub fn checkCompleteHead(hp: *HeadersParser, in: []const u8) CheckCompleteHeadError!u32 {
if (hp.state.isContent()) return 0;
const i = hp.findHeadersEnd(in);
const data = in[0..i];
if (hp.header_bytes_len + data.len > hp.header_bytes_buffer.len)
return error.HttpHeadersOversize;
@memcpy(hp.header_bytes_buffer[hp.header_bytes_len..][0..data.len], data);
hp.header_bytes_len += @intCast(data.len);
return i;
}pub fn read(r: *HeadersParser, conn: anytype, buffer: []u8, skip: bool) !usizeReads the body of the message into buffer. Returns the number of
bytes placed in the buffer.
If skip is true, the buffer will be unused and the body will be skipped.
See std.http.Client.Connection for an example of conn.
pub fn read(r: *HeadersParser, conn: anytype, buffer: []u8, skip: bool) !usize {
assert(r.state.isContent());
if (r.done) return 0;
var out_index: usize = 0;
while (true) {
switch (r.state) {
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => unreachable,
.finished => {
const data_avail = r.next_chunk_length;
if (skip) {
conn.fill() catch |err| switch (err) {
error.EndOfStream => {
r.done = true;
return 0;
},
else => |e| return e,
};
const nread = @min(conn.peek().len, data_avail);
conn.drop(@intCast(nread));
r.next_chunk_length -= nread;
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
return out_index;
} else if (out_index < buffer.len) {
const out_avail = buffer.len - out_index;
const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
const nread = try conn.read(buffer[0..can_read]);
r.next_chunk_length -= nread;
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
return nread;
} else {
return out_index;
}
},
.chunk_data_suffix, .chunk_data_suffix_r, .chunk_head_size, .chunk_head_ext, .chunk_head_r => {
conn.fill() catch |err| switch (err) {
error.EndOfStream => {
r.done = true;
return 0;
},
else => |e| return e,
};
const i = r.findChunkedLen(conn.peek());
conn.drop(@intCast(i));
switch (r.state) {
.invalid => return error.HttpChunkInvalid,
.chunk_data => if (r.next_chunk_length == 0) {
if (std.mem.eql(u8, conn.peek(), "\r\n")) {
r.state = .finished;
conn.drop(2);
} else {
// The trailer section is formatted identically
// to the header section.
r.state = .seen_rn;
}
r.done = true;
return out_index;
},
else => return out_index,
}
continue;
},
.chunk_data => {
const data_avail = r.next_chunk_length;
const out_avail = buffer.len - out_index;
if (skip) {
conn.fill() catch |err| switch (err) {
error.EndOfStream => {
r.done = true;
return 0;
},
else => |e| return e,
};
const nread = @min(conn.peek().len, data_avail);
conn.drop(@intCast(nread));
r.next_chunk_length -= nread;
} else if (out_avail > 0) {
const can_read: usize = @intCast(@min(data_avail, out_avail));
const nread = try conn.read(buffer[out_index..][0..can_read]);
r.next_chunk_length -= nread;
out_index += nread;
}
if (r.next_chunk_length == 0) {
r.state = .chunk_data_suffix;
continue;
}
return out_index;
},
}
}
}pub const HeadersParser = struct {
state: State = .start,
/// A fixed buffer of len `max_header_bytes`.
/// Pointers into this buffer are not stable until after a message is complete.
header_bytes_buffer: []u8,
header_bytes_len: u32,
next_chunk_length: u64,
/// `false`: headers. `true`: trailers.
done: bool,
/// Initializes the parser with a provided buffer `buf`.
pub fn init(buf: []u8) HeadersParser {
return .{
.header_bytes_buffer = buf,
.header_bytes_len = 0,
.done = false,
.next_chunk_length = 0,
};
}
/// Reinitialize the parser.
/// Asserts the parser is in the "done" state.
pub fn reset(hp: *HeadersParser) void {
assert(hp.done);
hp.* = .{
.state = .start,
.header_bytes_buffer = hp.header_bytes_buffer,
.header_bytes_len = 0,
.done = false,
.next_chunk_length = 0,
};
}
pub fn get(hp: HeadersParser) []u8 {
return hp.header_bytes_buffer[0..hp.header_bytes_len];
}
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
var hp: std.http.HeadParser = .{
.state = switch (r.state) {
.start => .start,
.seen_n => .seen_n,
.seen_r => .seen_r,
.seen_rn => .seen_rn,
.seen_rnr => .seen_rnr,
.finished => .finished,
else => unreachable,
},
};
const result = hp.feed(bytes);
r.state = switch (hp.state) {
.start => .start,
.seen_n => .seen_n,
.seen_r => .seen_r,
.seen_rn => .seen_rn,
.seen_rnr => .seen_rnr,
.finished => .finished,
};
return @intCast(result);
}
pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
var cp: std.http.ChunkParser = .{
.state = switch (r.state) {
.chunk_head_size => .head_size,
.chunk_head_ext => .head_ext,
.chunk_head_r => .head_r,
.chunk_data => .data,
.chunk_data_suffix => .data_suffix,
.chunk_data_suffix_r => .data_suffix_r,
.invalid => .invalid,
else => unreachable,
},
.chunk_len = r.next_chunk_length,
};
const result = cp.feed(bytes);
r.state = switch (cp.state) {
.head_size => .chunk_head_size,
.head_ext => .chunk_head_ext,
.head_r => .chunk_head_r,
.data => .chunk_data,
.data_suffix => .chunk_data_suffix,
.data_suffix_r => .chunk_data_suffix_r,
.invalid => .invalid,
};
r.next_chunk_length = cp.chunk_len;
return @intCast(result);
}
/// Returns whether or not the parser has finished parsing a complete
/// message. A message is only complete after the entire body has been read
/// and any trailing headers have been parsed.
pub fn isComplete(r: *HeadersParser) bool {
return r.done and r.state == .finished;
}
pub const CheckCompleteHeadError = error{HttpHeadersOversize};
/// Pushes `in` into the parser. Returns the number of bytes consumed by
/// the header. Any header bytes are appended to `header_bytes_buffer`.
pub fn checkCompleteHead(hp: *HeadersParser, in: []const u8) CheckCompleteHeadError!u32 {
if (hp.state.isContent()) return 0;
const i = hp.findHeadersEnd(in);
const data = in[0..i];
if (hp.header_bytes_len + data.len > hp.header_bytes_buffer.len)
return error.HttpHeadersOversize;
@memcpy(hp.header_bytes_buffer[hp.header_bytes_len..][0..data.len], data);
hp.header_bytes_len += @intCast(data.len);
return i;
}
pub const ReadError = error{
HttpChunkInvalid,
};
/// Reads the body of the message into `buffer`. Returns the number of
/// bytes placed in the buffer.
///
/// If `skip` is true, the buffer will be unused and the body will be skipped.
///
/// See `std.http.Client.Connection for an example of `conn`.
pub fn read(r: *HeadersParser, conn: anytype, buffer: []u8, skip: bool) !usize {
assert(r.state.isContent());
if (r.done) return 0;
var out_index: usize = 0;
while (true) {
switch (r.state) {
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => unreachable,
.finished => {
const data_avail = r.next_chunk_length;
if (skip) {
conn.fill() catch |err| switch (err) {
error.EndOfStream => {
r.done = true;
return 0;
},
else => |e| return e,
};
const nread = @min(conn.peek().len, data_avail);
conn.drop(@intCast(nread));
r.next_chunk_length -= nread;
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
return out_index;
} else if (out_index < buffer.len) {
const out_avail = buffer.len - out_index;
const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
const nread = try conn.read(buffer[0..can_read]);
r.next_chunk_length -= nread;
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
return nread;
} else {
return out_index;
}
},
.chunk_data_suffix, .chunk_data_suffix_r, .chunk_head_size, .chunk_head_ext, .chunk_head_r => {
conn.fill() catch |err| switch (err) {
error.EndOfStream => {
r.done = true;
return 0;
},
else => |e| return e,
};
const i = r.findChunkedLen(conn.peek());
conn.drop(@intCast(i));
switch (r.state) {
.invalid => return error.HttpChunkInvalid,
.chunk_data => if (r.next_chunk_length == 0) {
if (std.mem.eql(u8, conn.peek(), "\r\n")) {
r.state = .finished;
conn.drop(2);
} else {
// The trailer section is formatted identically
// to the header section.
r.state = .seen_rn;
}
r.done = true;
return out_index;
},
else => return out_index,
}
continue;
},
.chunk_data => {
const data_avail = r.next_chunk_length;
const out_avail = buffer.len - out_index;
if (skip) {
conn.fill() catch |err| switch (err) {
error.EndOfStream => {
r.done = true;
return 0;
},
else => |e| return e,
};
const nread = @min(conn.peek().len, data_avail);
conn.drop(@intCast(nread));
r.next_chunk_length -= nread;
} else if (out_avail > 0) {
const can_read: usize = @intCast(@min(data_avail, out_avail));
const nread = try conn.read(buffer[out_index..][0..can_read]);
r.next_chunk_length -= nread;
out_index += nread;
}
if (r.next_chunk_length == 0) {
r.state = .chunk_data_suffix;
continue;
}
return out_index;
},
}
}
}
}