From eaa129b09739aba7736f1b992f643df65e2924cc Mon Sep 17 00:00:00 2001 From: Stackie Jia Date: Tue, 5 May 2026 22:09:36 +0800 Subject: [PATCH 1/2] fix(status): measure bandwidth at post-send, not at enqueue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Per-client bandwidth on the status page was tracking the rate at which upstream data was queued into the client send buffer, not the rate at which the kernel actually delivered it. For slow clients, this stayed pinned at the upstream supply rate until the queue hit HWM and backpressure kicked in — overstating real client throughput. Move the byte counter from the three upstream-receive sites in stream_handle_fd_event() to inside connection_handle_write()'s zerocopy_send() loop. The counter now advances only when the kernel TCP stack accepts bytes, so the 1s tick in stream_tick() reports the real client receive rate even before the queue fills. Side benefit: total_bytes_sent_cumulative no longer over-counts bytes that were enqueued but lost on disconnect. Co-Authored-By: Claude Opus 4.7 --- src/connection.c | 2 ++ src/stream.c | 9 --------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/src/connection.c b/src/connection.c index 8ff5938..4b7e7ea 100644 --- a/src/connection.c +++ b/src/connection.c @@ -625,6 +625,8 @@ connection_write_status_t connection_handle_write(connection_t *c) { size_t bytes_sent = 0; int ret = zerocopy_send(c->fd, &c->zc_queue, &bytes_sent); total_sent += bytes_sent; + /* Count post-send so per-client bandwidth reflects actual receive rate, not enqueue rate. */ + c->stream.total_bytes_sent += (uint64_t)bytes_sent; if (ret < 0 && ret != -2) { c->state = CONN_CLOSING; diff --git a/src/stream.c b/src/stream.c index d8c81d0..933fb68 100644 --- a/src/stream.c +++ b/src/stream.c @@ -107,9 +107,6 @@ int stream_handle_fd_event(stream_context_t *ctx, int fd, uint32_t events, int64 } return -1; } - if (result > 0) { - ctx->total_bytes_sent += (uint64_t)result; - } return 0; } @@ -119,9 +116,6 @@ int stream_handle_fd_event(stream_context_t *ctx, int fd, uint32_t events, int64 if (result < 0) { return -1; /* Error */ } - if (result > 0) { - ctx->total_bytes_sent += (uint64_t)result; - } return 0; /* Success - processed data, continue with other events */ } @@ -143,9 +137,6 @@ int stream_handle_fd_event(stream_context_t *ctx, int fd, uint32_t events, int64 logger(LOG_ERROR, "HTTP Proxy: Socket event handling failed"); return -1; } - if (result > 0) { - ctx->total_bytes_sent += (uint64_t)result; - } return 0; } From 7f591b9f3ae8300479aca779c373cc857437d1bc Mon Sep 17 00:00:00 2001 From: Stackie Jia Date: Tue, 5 May 2026 22:17:43 +0800 Subject: [PATCH 2/2] fix(status): remove leftover enqueue-time counters in fcc/multicast MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous commit only removed enqueue-time `total_bytes_sent` increments from stream.c, missing four equivalent sites in fcc_handle_unicast_media(), fcc_handle_mcast_active() (×2), and mcast_session_handle_event(). For multicast/FCC streams, bytes were being counted at both enqueue (these sites) and post-send (the new connection.c site), so the displayed bandwidth was roughly doubled (reported ~17 Mbit/s for an 8 Mbit/s stream). Drop the four leftover increments so post-send is the sole counter. The local `flushed_bytes` accumulator in fcc_handle_mcast_active() is preserved — it still drives the diagnostic "Flushed pending buffer chain" log line. Co-Authored-By: Claude Opus 4.7 --- src/fcc.c | 11 ++--------- src/multicast.c | 5 +---- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/src/fcc.c b/src/fcc.c index e18a755..ac7ab04 100644 --- a/src/fcc.c +++ b/src/fcc.c @@ -454,10 +454,7 @@ int fcc_handle_unicast_media(stream_context_t *ctx, buffer_ref_t *buf_ref) { } /* Forward RTP payload to client (with reordering) */ - int processed_bytes = stream_process_rtp_payload(ctx, buf_ref); - if (processed_bytes > 0) { - ctx->total_bytes_sent += (uint64_t)processed_bytes; - } + stream_process_rtp_payload(ctx, buf_ref); /* Check if we should terminate FCC based on reorder's delivered sequence. * base_seq - 1 is the last sequence number successfully delivered. @@ -562,7 +559,6 @@ int fcc_handle_mcast_active(stream_context_t *ctx, buffer_ref_t *buf_ref) { buffer_ref_t *next = node->send_next; int processed_bytes = stream_process_rtp_payload(ctx, node); if (likely(processed_bytes > 0)) { - ctx->total_bytes_sent += (uint64_t)processed_bytes; flushed_bytes += (uint64_t)processed_bytes; } buffer_ref_put(node); @@ -578,10 +574,7 @@ int fcc_handle_mcast_active(stream_context_t *ctx, buffer_ref_t *buf_ref) { /* Forward multicast data to client (true zero-copy) or capture I-frame * (snapshot) */ - int processed_bytes = stream_process_rtp_payload(ctx, buf_ref); - if (likely(processed_bytes > 0)) { - ctx->total_bytes_sent += (uint64_t)processed_bytes; - } + stream_process_rtp_payload(ctx, buf_ref); return 0; } diff --git a/src/multicast.c b/src/multicast.c index 0eb04ca..ffe291f 100644 --- a/src/multicast.c +++ b/src/multicast.c @@ -535,10 +535,7 @@ int mcast_session_handle_event(mcast_session_t *session, stream_context_t *ctx, /* Handle based on FCC state (if FCC initialized) */ if (!ctx->fcc.initialized) { /* Direct multicast without FCC - forward to client */ - int processed_bytes = stream_process_rtp_payload(ctx, recv_buf); - if (processed_bytes > 0) { - ctx->total_bytes_sent += (uint64_t)processed_bytes; - } + stream_process_rtp_payload(ctx, recv_buf); buffer_ref_put(recv_buf); continue; /* Read next packet */ }