send_buffer: []u8send_buffer_start: usizeIndex of the first byte in send_buffer.
This is 0 unless a short write happens in write.
send_buffer_end: usizeIndex of the last byte + 1 in send_buffer.
transfer_encoding: TransferEncodingnull means transfer-encoding: chunked.
As a debugging utility, counts down to zero as bytes are written.
elide_body: boolchunk_len: usizeIndicates how much of the end of the send_buffer corresponds to a
chunk. This amount of data will be wrapped by an HTTP chunk header.
anyerror means the error set is known only at runtime.
File descriptor does not hold the required rights to write to it.
Connection reset by peer.
The process cannot access the file because another process has locked a portion of the file. Windows-only.
This error occurs when a device gets disconnected before or mid-flush while it's being written to - errno(6): No such device or address.
This error occurs in Linux if the process being written to no longer exists.
The Operating System returned an undocumented error code.
This error is in theory not possible, but it would be better to handle this error than to invoke undefined behavior.
When this error code is observed, it usually means the Zig Standard Library needs a small patch to add the error code to the error set for the respective function.
This error occurs when no global event loop is configured, and reading from the file descriptor would block.
pub const WriteError = error{
DiskQuota,
FileTooBig,
InputOutput,
NoSpaceLeft,
DeviceBusy,
InvalidArgument,
/// File descriptor does not hold the required rights to write to it.
AccessDenied,
BrokenPipe,
SystemResources,
OperationAborted,
NotOpenForWriting,
/// The process cannot access the file because another process has locked
/// a portion of the file. Windows-only.
LockViolation,
/// This error occurs when no global event loop is configured,
/// and reading from the file descriptor would block.
WouldBlock,
/// Connection reset by peer.
ConnectionResetByPeer,
/// This error occurs in Linux if the process being written to
/// no longer exists.
ProcessNotFound,
/// This error occurs when a device gets disconnected before or mid-flush
/// while it's being written to - errno(6): No such device or address.
NoDevice,
} || UnexpectedErrorpub fn end(r: *Response) WriteError!voidWhen using content-length, asserts that the amount of data sent matches
the value sent in the header, then calls flush.
Otherwise, transfer-encoding: chunked is being used, and it writes the
end-of-stream message, then flushes the stream to the system.
Respects the value of elide_body to omit all data after the headers.
r: *Responsepub fn end(r: *Response) WriteError!void {
switch (r.transfer_encoding) {
.content_length => |len| {
assert(len == 0); // Trips when end() called before all bytes written.
try flush_cl(r);
},
.none => {
try flush_cl(r);
},
.chunked => {
try flush_chunked(r, &.{});
},
}
r.* = undefined;
}pub fn endChunked(r: *Response, options: EndChunkedOptions) WriteError!voidAsserts that the Response is using transfer-encoding: chunked.
Writes the end-of-stream message and any optional trailers, then
flushes the stream to the system.
Respects the value of elide_body to omit all data after the headers.
Asserts there are at most 25 trailers.
r: *Responseoptions: EndChunkedOptionspub fn endChunked(r: *Response, options: EndChunkedOptions) WriteError!void {
assert(r.transfer_encoding == .chunked);
try flush_chunked(r, options.trailers);
r.* = undefined;
}pub fn write(r: *Response, bytes: []const u8) WriteError!usizeIf using content-length, asserts that writing these bytes to the client
would not exceed the content-length value sent in the HTTP header.
May return 0, which does not indicate end of stream. The caller decides
when the end of stream occurs by calling end.
r: *Responsebytes: []const u8pub fn write(r: *Response, bytes: []const u8) WriteError!usize {
switch (r.transfer_encoding) {
.content_length, .none => return write_cl(r, bytes),
.chunked => return write_chunked(r, bytes),
}
}pub fn writeAll(r: *Response, bytes: []const u8) WriteError!voidIf using content-length, asserts that writing these bytes to the client would not exceed the content-length value sent in the HTTP header.
r: *Responsebytes: []const u8pub fn writeAll(r: *Response, bytes: []const u8) WriteError!void {
var index: usize = 0;
while (index < bytes.len) {
index += try write(r, bytes[index..]);
}
}pub fn flush(r: *Response) WriteError!voidSends all buffered data to the client.
This is redundant after calling end.
Respects the value of elide_body to omit all data after the headers.
r: *Responsepub fn flush(r: *Response) WriteError!void {
switch (r.transfer_encoding) {
.none, .content_length => return flush_cl(r),
.chunked => return flush_chunked(r, null),
}
}pub const Response = struct {
stream: net.Stream,
send_buffer: []u8,
/// Index of the first byte in `send_buffer`.
/// This is 0 unless a short write happens in `write`.
send_buffer_start: usize,
/// Index of the last byte + 1 in `send_buffer`.
send_buffer_end: usize,
/// `null` means transfer-encoding: chunked.
/// As a debugging utility, counts down to zero as bytes are written.
transfer_encoding: TransferEncoding,
elide_body: bool,
/// Indicates how much of the end of the `send_buffer` corresponds to a
/// chunk. This amount of data will be wrapped by an HTTP chunk header.
chunk_len: usize,
pub const TransferEncoding = union(enum) {
/// End of connection signals the end of the stream.
none,
/// As a debugging utility, counts down to zero as bytes are written.
content_length: u64,
/// Each chunk is wrapped in a header and trailer.
chunked,
};
pub const WriteError = net.Stream.WriteError;
/// When using content-length, asserts that the amount of data sent matches
/// the value sent in the header, then calls `flush`.
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
/// end-of-stream message, then flushes the stream to the system.
/// Respects the value of `elide_body` to omit all data after the headers.
pub fn end(r: *Response) WriteError!void {
switch (r.transfer_encoding) {
.content_length => |len| {
assert(len == 0); // Trips when end() called before all bytes written.
try flush_cl(r);
},
.none => {
try flush_cl(r);
},
.chunked => {
try flush_chunked(r, &.{});
},
}
r.* = undefined;
}
pub const EndChunkedOptions = struct {
trailers: []const http.Header = &.{},
};
/// Asserts that the Response is using transfer-encoding: chunked.
/// Writes the end-of-stream message and any optional trailers, then
/// flushes the stream to the system.
/// Respects the value of `elide_body` to omit all data after the headers.
/// Asserts there are at most 25 trailers.
pub fn endChunked(r: *Response, options: EndChunkedOptions) WriteError!void {
assert(r.transfer_encoding == .chunked);
try flush_chunked(r, options.trailers);
r.* = undefined;
}
/// If using content-length, asserts that writing these bytes to the client
/// would not exceed the content-length value sent in the HTTP header.
/// May return 0, which does not indicate end of stream. The caller decides
/// when the end of stream occurs by calling `end`.
pub fn write(r: *Response, bytes: []const u8) WriteError!usize {
switch (r.transfer_encoding) {
.content_length, .none => return write_cl(r, bytes),
.chunked => return write_chunked(r, bytes),
}
}
fn write_cl(context: *const anyopaque, bytes: []const u8) WriteError!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context)));
var trash: u64 = std.math.maxInt(u64);
const len = switch (r.transfer_encoding) {
.content_length => |*len| len,
else => &trash,
};
if (r.elide_body) {
len.* -= bytes.len;
return bytes.len;
}
if (bytes.len + r.send_buffer_end > r.send_buffer.len) {
const send_buffer_len = r.send_buffer_end - r.send_buffer_start;
var iovecs: [2]std.posix.iovec_const = .{
.{
.base = r.send_buffer.ptr + r.send_buffer_start,
.len = send_buffer_len,
},
.{
.base = bytes.ptr,
.len = bytes.len,
},
};
const n = try r.stream.writev(&iovecs);
if (n >= send_buffer_len) {
// It was enough to reset the buffer.
r.send_buffer_start = 0;
r.send_buffer_end = 0;
const bytes_n = n - send_buffer_len;
len.* -= bytes_n;
return bytes_n;
}
// It didn't even make it through the existing buffer, let
// alone the new bytes provided.
r.send_buffer_start += n;
return 0;
}
// All bytes can be stored in the remaining space of the buffer.
@memcpy(r.send_buffer[r.send_buffer_end..][0..bytes.len], bytes);
r.send_buffer_end += bytes.len;
len.* -= bytes.len;
return bytes.len;
}
fn write_chunked(context: *const anyopaque, bytes: []const u8) WriteError!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context)));
assert(r.transfer_encoding == .chunked);
if (r.elide_body)
return bytes.len;
if (bytes.len + r.send_buffer_end > r.send_buffer.len) {
const send_buffer_len = r.send_buffer_end - r.send_buffer_start;
const chunk_len = r.chunk_len + bytes.len;
var header_buf: [18]u8 = undefined;
const chunk_header = std.fmt.bufPrint(&header_buf, "{x}\r\n", .{chunk_len}) catch unreachable;
var iovecs: [5]std.posix.iovec_const = .{
.{
.base = r.send_buffer.ptr + r.send_buffer_start,
.len = send_buffer_len - r.chunk_len,
},
.{
.base = chunk_header.ptr,
.len = chunk_header.len,
},
.{
.base = r.send_buffer.ptr + r.send_buffer_end - r.chunk_len,
.len = r.chunk_len,
},
.{
.base = bytes.ptr,
.len = bytes.len,
},
.{
.base = "\r\n",
.len = 2,
},
};
// TODO make this writev instead of writevAll, which involves
// complicating the logic of this function.
try r.stream.writevAll(&iovecs);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;
return bytes.len;
}
// All bytes can be stored in the remaining space of the buffer.
@memcpy(r.send_buffer[r.send_buffer_end..][0..bytes.len], bytes);
r.send_buffer_end += bytes.len;
r.chunk_len += bytes.len;
return bytes.len;
}
/// If using content-length, asserts that writing these bytes to the client
/// would not exceed the content-length value sent in the HTTP header.
pub fn writeAll(r: *Response, bytes: []const u8) WriteError!void {
var index: usize = 0;
while (index < bytes.len) {
index += try write(r, bytes[index..]);
}
}
/// Sends all buffered data to the client.
/// This is redundant after calling `end`.
/// Respects the value of `elide_body` to omit all data after the headers.
pub fn flush(r: *Response) WriteError!void {
switch (r.transfer_encoding) {
.none, .content_length => return flush_cl(r),
.chunked => return flush_chunked(r, null),
}
}
fn flush_cl(r: *Response) WriteError!void {
try r.stream.writeAll(r.send_buffer[r.send_buffer_start..r.send_buffer_end]);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
}
fn flush_chunked(r: *Response, end_trailers: ?[]const http.Header) WriteError!void {
const max_trailers = 25;
if (end_trailers) |trailers| assert(trailers.len <= max_trailers);
assert(r.transfer_encoding == .chunked);
const http_headers = r.send_buffer[r.send_buffer_start .. r.send_buffer_end - r.chunk_len];
if (r.elide_body) {
try r.stream.writeAll(http_headers);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;
return;
}
var header_buf: [18]u8 = undefined;
const chunk_header = std.fmt.bufPrint(&header_buf, "{x}\r\n", .{r.chunk_len}) catch unreachable;
var iovecs: [max_trailers * 4 + 5]std.posix.iovec_const = undefined;
var iovecs_len: usize = 0;
iovecs[iovecs_len] = .{
.base = http_headers.ptr,
.len = http_headers.len,
};
iovecs_len += 1;
if (r.chunk_len > 0) {
iovecs[iovecs_len] = .{
.base = chunk_header.ptr,
.len = chunk_header.len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.base = r.send_buffer.ptr + r.send_buffer_end - r.chunk_len,
.len = r.chunk_len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.base = "\r\n",
.len = 2,
};
iovecs_len += 1;
}
if (end_trailers) |trailers| {
iovecs[iovecs_len] = .{
.base = "0\r\n",
.len = 3,
};
iovecs_len += 1;
for (trailers) |trailer| {
iovecs[iovecs_len] = .{
.base = trailer.name.ptr,
.len = trailer.name.len,
};
iovecs_len += 1;
iovecs[iovecs_len] = .{
.base = ": ",
.len = 2,
};
iovecs_len += 1;
if (trailer.value.len != 0) {
iovecs[iovecs_len] = .{
.base = trailer.value.ptr,
.len = trailer.value.len,
};
iovecs_len += 1;
}
iovecs[iovecs_len] = .{
.base = "\r\n",
.len = 2,
};
iovecs_len += 1;
}
iovecs[iovecs_len] = .{
.base = "\r\n",
.len = 2,
};
iovecs_len += 1;
}
try r.stream.writevAll(iovecs[0..iovecs_len]);
r.send_buffer_start = 0;
r.send_buffer_end = 0;
r.chunk_len = 0;
}
pub fn writer(r: *Response) std.io.AnyWriter {
return .{
.writeFn = switch (r.transfer_encoding) {
.none, .content_length => write_cl,
.chunked => write_chunked,
},
.context = r,
};
}
}