Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 66 additions & 8 deletions src/bun.js/api/bun/h2_frame_parser.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2352,6 +2352,18 @@ pub const H2FrameParser = struct {
return data.len;
};

// Stream was refused in handleReceivedStreamID because the concurrent
// stream limit was exceeded. RST_STREAM(REFUSED_STREAM) has already been
// sent. Consume the payload so currentFrame/remainingLength stay in sync,
// but do not decode headers or notify JS for this stream.
if (stream.state == .CLOSED and stream.rstCode == @intFromEnum(ErrorCode.REFUSED_STREAM)) {
if (handleIncommingPayload(this, data, frame.streamIdentifier)) |content| {
this.readBuffer.reset();
return content.end;
}
return data.len;
}

const settings = this.remoteSettings orelse this.localSettings;
if (frame.length > settings.maxFrameSize) {
this.sendGoAway(frame.streamIdentifier, ErrorCode.FRAME_SIZE_ERROR, "invalid Headers frame size", this.lastStreamID, true);
Expand Down Expand Up @@ -2517,7 +2529,7 @@ pub const H2FrameParser = struct {
}

/// We need to be very carefull because this is not a stable ptr
fn handleReceivedStreamID(this: *H2FrameParser, streamIdentifier: u32) ?*Stream {
fn handleReceivedStreamID(this: *H2FrameParser, streamIdentifier: u32, frameType: u8) ?*Stream {
// connection stream
if (streamIdentifier == 0) {
return null;
Expand All @@ -2528,6 +2540,52 @@ pub const H2FrameParser = struct {
return entry.value_ptr;
}

// RFC 9113 Section 5.1.2: an endpoint that receives a HEADERS frame that
// causes its advertised concurrent stream limit to be exceeded MUST treat
// this as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM. We use
// REFUSED_STREAM so the client may retry safely. Only HEADERS frames open
// new streams, so the check is gated on frame type to avoid refusing
// PRIORITY/WINDOW_UPDATE/etc. on idle streams.
if (this.isServer and
frameType == @intFromEnum(FrameType.HTTP_FRAME_HEADERS) and
this.localSettings.maxConcurrentStreams != std.math.maxInt(u32))
{
var open: u32 = 0;
var it = this.streams.valueIterator();
while (it.next()) |s| {
if (s.state != .CLOSED) open += 1;
}
if (open >= this.localSettings.maxConcurrentStreams) {
if (streamIdentifier > this.lastStreamID) this.lastStreamID = streamIdentifier;
var buffer: [FrameHeader.byteSize + 4]u8 = undefined;
@memset(&buffer, 0);
var writerStream = std.io.fixedBufferStream(&buffer);
const writer = writerStream.writer();
var frame: FrameHeader = .{ .type = @intFromEnum(FrameType.HTTP_FRAME_RST_STREAM), .flags = 0, .streamIdentifier = streamIdentifier, .length = 4 };
_ = frame.write(@TypeOf(writer), writer);
var value: u32 = @intFromEnum(ErrorCode.REFUSED_STREAM);
value = @byteSwap(value);
_ = writer.write(std.mem.asBytes(&value)) catch 0;
_ = this.write(&buffer);

// Record the stream as CLOSED so subsequent frames for it are
// handled against an existing entry instead of re-triggering this
// path or being treated as the connection stream. Returning the
// pointer (not null) preserves the invariant that a null result
// means "stream 0", so callers do not emit GOAWAY(PROTOCOL_ERROR).
const entry = bun.handleOom(this.streams.getOrPut(streamIdentifier));
entry.value_ptr.* = Stream.init(
streamIdentifier,
this.localSettings.initialWindowSize,
if (this.remoteSettings) |s| s.initialWindowSize else DEFAULT_WINDOW_SIZE,
this.paddingStrategy,
);
entry.value_ptr.state = .CLOSED;
entry.value_ptr.rstCode = @intFromEnum(ErrorCode.REFUSED_STREAM);
return entry.value_ptr;
}
Comment on lines +2543 to +2586
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🔴 The new null return path in handleReceivedStreamID is treated as a connection-level error by every caller: handleHeadersFrame, handleContinuationFrame, handleDataFrame, and handlePriorityFrame all have an orelse sendGoAway(PROTOCOL_ERROR) branch that fires on null, so a refused stream causes RST_STREAM(REFUSED_STREAM) followed immediately by GOAWAY(PROTOCOL_ERROR), which kills the entire connection and makes safe client retry impossible—directly contradicting the stated RFC 9113 intent. Additionally, the concurrent-stream limit check fires for all incoming frame types (DATA, PRIORITY, etc.), not just HEADERS; only HEADERS frames open new streams, so sending REFUSED_STREAM for a PRIORITY frame on an idle stream (valid per RFC 9113 §5.3.1) is semantically wrong and also triggers the GOAWAY path.

Extended reasoning...

Root cause — broken null invariant

The pre-existing invariant in this file is: handleReceivedStreamID returns null only when the frame targets stream 0 (the connection control stream), which is always a connection-level protocol error. Every frame handler relies on this: handleHeadersFrame (line 2350), handleContinuationFrame (line 2309), handleDataFrame (line 1994), and handlePriorityFrame (line 2271) all contain an orelse { this.sendGoAway(frame.streamIdentifier, ErrorCode.PROTOCOL_ERROR, ..., true); return data.len; } branch. The PR introduces a second null-return path—refused streams—without updating any of these callers.

Concrete HEADERS path (bug_001)

  1. Client sends HEADERS for stream N when open >= maxConcurrentStreams.
  2. readByteshandleReceivedStreamID(N): check fires, writes RST_STREAM(REFUSED_STREAM), returns null.
  3. readByteshandleHeadersFrame(header, bytes, null).
  4. handleHeadersFrame line 2350: var stream = stream_ orelse { this.sendGoAway(frame.streamIdentifier, ErrorCode.PROTOCOL_ERROR, "Headers frame on connection stream", this.lastStreamID, true); return data.len; };
  5. sendGoAway with emitError=true dispatches .onError and .onEnd, terminating the entire HTTP/2 connection.

Net result: the server sends both RST_STREAM(REFUSED_STREAM) and GOAWAY(PROTOCOL_ERROR). The GOAWAY kills the connection for all in-flight streams, defeating the stated goal of allowing the client to retry safely. The same path fires through handleContinuationFrame for multi-frame HEADERS sequences.

Non-HEADERS frame types (bug_002)

handleReceivedStreamID is invoked at lines 2607, 2655, and 2689 for all frame types before the per-type dispatch. The new check fires whenever a frame arrives with an unknown stream ID while the server is at capacity, regardless of frame type. RFC 9113 §5.1.2 restricts the concurrent-stream limit to HEADERS frames (stream creation). Specifically:

  • A PRIORITY frame on an idle stream is explicitly valid per RFC 9113 §5.3.1. The server incorrectly sends RST_STREAM(REFUSED_STREAM), then handlePriorityFrame (line 2271) receives null and fires sendGoAway(PROTOCOL_ERROR), terminating the connection for RFC-valid client behavior.
  • DATA on a non-existent stream is a protocol violation, but the correct response is STREAM_CLOSED, not REFUSED_STREAM (which semantically means "retry is safe").
  • WINDOW_UPDATE on a non-existent stream avoids GOAWAY because its handler uses if (stream) |s|, but still incorrectly emits RST_STREAM(REFUSED_STREAM).

Why the bundled test may pass despite the bug

The nghttp2 client library processes RST_STREAM before GOAWAY if both arrive in the same TCP segment. The second request's aborted event fires from the RST_STREAM, and the countdown.dec() in the close handler may decrement to zero before the GOAWAY-induced connection teardown prevents the first request from completing. This makes the test timing-sensitive rather than a definitive correctness proof.

Fix

Move the concurrent-stream limit check inside handleHeadersFrame (or pass the frame type into handleReceivedStreamID and gate the check on FrameType.HTTP_FRAME_HEADERS). When the limit is exceeded, send RST_STREAM(REFUSED_STREAM) and return normally (do not return null), so no GOAWAY is triggered. This preserves the existing null-means-connection-error invariant and restricts the refused-stream logic to HEADERS frames only, as RFC 9113 §5.1.2 requires.

}
Comment on lines +2543 to +2587
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Don't use null here to mean "refused stream".

Line 2553 collides with the existing null sentinel used by the frame handlers: readBytes() passes this result straight into handleHeadersFrame(), whose null branch is the connection-stream error path, so an over-limit request will send RST_STREAM(REFUSED_STREAM) and then still emit GOAWAY(PROTOCOL_ERROR). That fast-fail also skips payload consumption, leaving currentFrame/remainingLength stale. Because this helper runs before frame-type dispatch, the new branch can also refuse fresh non-HEADERS frames. Please move the limit enforcement into the HEADERS path and represent refusal with a distinct state/sentinel instead of null.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/bun.js/api/bun/h2_frame_parser.zig` around lines 2531 - 2555, The current
pre-dispatch concurrent-stream check in readBytes()/h2_frame_parser.zig uses
null to indicate a REFUSED_STREAM, which collides with the existing null
sentinel handled by handleHeadersFrame() (causing GOAWAY/PROTOCOL_ERROR and
skipping payload consumption); move the maxConcurrentStreams enforcement out of
the pre-dispatch path into the specific HEADERS handling branch and/or return a
distinct sentinel instead of null (e.g., a tagged enum or a specific error
variant like RefusedStream) so callers like readBytes() and handleHeadersFrame()
can differentiate "refused stream" from the existing null/connection-error path;
update uses of lastStreamID, the code that writes the RST_STREAM(REFUSED_STREAM)
(the FrameHeader creation and write sequence), and any code that relies on
currentFrame/remainingLength to ensure payload is consumed or skipped correctly
when a refusal is issued.


if (streamIdentifier > this.lastStreamID) {
this.lastStreamID = streamIdentifier;
}
Expand Down Expand Up @@ -2578,7 +2636,7 @@ pub const H2FrameParser = struct {
if (this.currentFrame) |header| {
log("current frame {s} {} {} {} {}", .{ if (this.isServer) "server" else "client", header.type, header.length, header.flags, header.streamIdentifier });

const stream = this.handleReceivedStreamID(header.streamIdentifier);
const stream = this.handleReceivedStreamID(header.streamIdentifier, header.type);
return switch (header.type) {
@intFromEnum(FrameType.HTTP_FRAME_SETTINGS) => this.handleSettingsFrame(header, bytes),
@intFromEnum(FrameType.HTTP_FRAME_WINDOW_UPDATE) => this.handleWindowUpdateFrame(header, bytes, stream),
Expand Down Expand Up @@ -2626,7 +2684,7 @@ pub const H2FrameParser = struct {
this.currentFrame = header;
this.remainingLength = header.length;
log("new frame {} {} {} {}", .{ header.type, header.length, header.flags, header.streamIdentifier });
const stream = this.handleReceivedStreamID(header.streamIdentifier);
const stream = this.handleReceivedStreamID(header.streamIdentifier, header.type);

return switch (header.type) {
@intFromEnum(FrameType.HTTP_FRAME_SETTINGS) => this.handleSettingsFrame(header, bytes[needed..]) + needed,
Expand Down Expand Up @@ -2660,7 +2718,7 @@ pub const H2FrameParser = struct {
log("new frame {s} {} {} {} {}", .{ if (this.isServer) "server" else "client", header.type, header.length, header.flags, header.streamIdentifier });
this.currentFrame = header;
this.remainingLength = header.length;
const stream = this.handleReceivedStreamID(header.streamIdentifier);
const stream = this.handleReceivedStreamID(header.streamIdentifier, header.type);
return switch (header.type) {
@intFromEnum(FrameType.HTTP_FRAME_SETTINGS) => this.handleSettingsFrame(header, bytes[FrameHeader.byteSize..]) + FrameHeader.byteSize,
@intFromEnum(FrameType.HTTP_FRAME_WINDOW_UPDATE) => this.handleWindowUpdateFrame(header, bytes[FrameHeader.byteSize..], stream) + FrameHeader.byteSize,
Expand Down Expand Up @@ -3883,7 +3941,7 @@ pub const H2FrameParser = struct {
if (id > MAX_STREAM_ID) {
return jsc.JSValue.jsNumber(-1);
}
_ = this.handleReceivedStreamID(id) orelse {
_ = this.handleReceivedStreamID(id, std.math.maxInt(u8)) orelse {
return jsc.JSValue.jsNumber(-1);
};

Expand Down Expand Up @@ -4139,7 +4197,7 @@ pub const H2FrameParser = struct {
if (err == error.OutOfMemory) {
return globalObject.throw("Failed to allocate header buffer", .{});
}
const stream = this.handleReceivedStreamID(stream_id) orelse {
const stream = this.handleReceivedStreamID(stream_id, std.math.maxInt(u8)) orelse {
return jsc.JSValue.jsNumber(-1);
};
if (!stream_ctx_arg.isEmptyOrUndefinedOrNull() and stream_ctx_arg.isObject()) {
Expand Down Expand Up @@ -4176,7 +4234,7 @@ pub const H2FrameParser = struct {
if (err == error.OutOfMemory) {
return globalObject.throw("Failed to allocate header buffer", .{});
}
const stream = this.handleReceivedStreamID(stream_id) orelse {
const stream = this.handleReceivedStreamID(stream_id, std.math.maxInt(u8)) orelse {
return jsc.JSValue.jsNumber(-1);
};
stream.state = .CLOSED;
Expand All @@ -4193,7 +4251,7 @@ pub const H2FrameParser = struct {
const encoded_data = encoded_headers.items;
const encoded_size = encoded_data.len;

const stream = this.handleReceivedStreamID(stream_id) orelse {
const stream = this.handleReceivedStreamID(stream_id, std.math.maxInt(u8)) orelse {
return jsc.JSValue.jsNumber(-1);
};
if (!stream_ctx_arg.isEmptyOrUndefinedOrNull() and stream_ctx_arg.isObject()) {
Expand Down
56 changes: 56 additions & 0 deletions test/js/node/test/parallel/test-http2-max-concurrent-streams.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
'use strict';

const common = require('../common');
if (!common.hasCrypto)
common.skip('missing crypto');
const assert = require('assert');
const h2 = require('http2');
const Countdown = require('../common/countdown');

// Only allow one stream to be open at a time
const server = h2.createServer({ settings: { maxConcurrentStreams: 1 } });

// The stream handler must be called only once
server.on('stream', common.mustCall((stream) => {
stream.respond();
stream.end('hello world');
}));
Comment on lines +14 to +17
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Make this regression deterministic and prove the session stays alive.

The first stream is ended immediately, so the second request only exercises the limit if timing happens to keep the first one open long enough. Then Lines 22-25 shut both sides down as soon as the two streams close, so this would still pass if the implementation sent REFUSED_STREAM and then GOAWAYed the session. Please hold the first stream open until the second request is refused, then verify a follow-up request still succeeds before cleanup.

Also applies to: 22-25, 31-55

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@test/js/node/test/parallel/test-http2-max-concurrent-streams.js` around lines
14 - 17, The current test ends the first HTTP/2 stream immediately (inside
server.on('stream') where stream.respond() and stream.end('hello world') are
called), making the max-concurrent-streams behavior timing-dependent; change the
server stream handler to keep the first stream open (do not call stream.end) and
only close it after the second client request has been observed and explicitly
rejected (e.g., detect REFUSED_STREAM or error on the second request produced by
clientSession.request()); after asserting the second request is refused, send a
follow-up successful request on the same session and assert it succeeds, then
close the first stream and perform the existing session cleanup—use the existing
identifiers server, stream, clientSession/request handlers to locate the code to
modify.


server.listen(0, common.mustCall(() => {
const client = h2.connect(`http://localhost:${server.address().port}`);

const countdown = new Countdown(2, () => {
server.close();
client.close();
});

client.on('remoteSettings', common.mustCall((settings) => {
assert.strictEqual(settings.maxConcurrentStreams, 1);
}));

// This one should go through with no problems
{
const req = client.request({ ':method': 'POST' });
req.on('aborted', common.mustNotCall());
req.on('response', common.mustCall());
req.resume();
req.on('end', common.mustCall());
req.on('close', common.mustCall(() => countdown.dec()));
req.end();
}

{
// This one should be aborted
const req = client.request({ ':method': 'POST' });
req.on('aborted', common.mustCall());
req.on('response', common.mustNotCall());
req.resume();
req.on('end', common.mustNotCall());
req.on('close', common.mustCall(() => countdown.dec()));
req.on('error', common.expectsError({
code: 'ERR_HTTP2_STREAM_ERROR',
name: 'Error',
message: 'Stream closed with error code NGHTTP2_REFUSED_STREAM'
}));
}
}));
Loading