summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorStefan Eissing <icing@apache.org>2022-09-27 10:53:51 +0000
committerStefan Eissing <icing@apache.org>2022-09-27 10:53:51 +0000
commitb88117f6024f8675b848d55d0920230cab97ad46 (patch)
treecf57e8a8c29584c5cbb1cb785364aa49ed18be29 /modules
parentd8e765773e4e054866432fac7905df02c6f2fd3d (diff)
downloadhttpd-b88117f6024f8675b848d55d0920230cab97ad46.tar.gz
*) mod_http2: type adjustments and castings for int/apr_uint32_t/apr_size_t/apr_off_t.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1904297 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'modules')
-rw-r--r--modules/http2/h2_c2.c4
-rw-r--r--modules/http2/h2_mplx.c42
-rw-r--r--modules/http2/h2_mplx.h18
-rw-r--r--modules/http2/h2_proxy_session.c13
-rw-r--r--modules/http2/h2_proxy_util.c4
-rw-r--r--modules/http2/h2_push.c9
-rw-r--r--modules/http2/h2_session.c6
-rw-r--r--modules/http2/h2_session.h18
-rw-r--r--modules/http2/h2_stream.c15
-rw-r--r--modules/http2/h2_util.c188
-rw-r--r--modules/http2/h2_util.h21
-rw-r--r--modules/http2/h2_workers.c7
-rw-r--r--modules/http2/h2_workers.h2
-rw-r--r--modules/http2/mod_http2.c4
14 files changed, 118 insertions, 233 deletions
diff --git a/modules/http2/h2_c2.c b/modules/http2/h2_c2.c
index ec5d3a99fd..e70f4c7092 100644
--- a/modules/http2/h2_c2.c
+++ b/modules/http2/h2_c2.c
@@ -166,8 +166,8 @@ static apr_status_t h2_c2_filter_in(ap_filter_t* f,
apr_status_t status = APR_SUCCESS;
apr_bucket *b;
apr_off_t bblen;
- apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
- (apr_size_t)readbytes : APR_SIZE_MAX);
+ apr_size_t rmax = (readbytes < APR_INT32_MAX)?
+ (apr_size_t)readbytes : APR_INT32_MAX;
conn_ctx = h2_conn_ctx_get(f->c);
AP_DEBUG_ASSERT(conn_ctx);
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
index ffc17ffb4b..3cbd3aca6c 100644
--- a/modules/http2/h2_mplx.c
+++ b/modules/http2/h2_mplx.c
@@ -234,7 +234,8 @@ static h2_c2_transit *c2_transit_get(h2_mplx *m)
static void c2_transit_recycle(h2_mplx *m, h2_c2_transit *transit)
{
- if (m->c2_transits->nelts >= m->max_spare_transits) {
+ if (m->c2_transits->nelts >= APR_INT32_MAX ||
+ (apr_uint32_t)m->c2_transits->nelts >= m->max_spare_transits) {
c2_transit_destroy(transit);
}
else {
@@ -307,7 +308,7 @@ h2_mplx *h2_mplx_c1_create(int child_num, apr_uint32_t id, h2_stream *stream0,
m->q = h2_iq_create(m->pool, m->max_streams);
m->workers = workers;
- m->processing_max = H2MIN((int)h2_workers_get_max_workers(workers), m->max_streams);
+ m->processing_max = H2MIN(h2_workers_get_max_workers(workers), m->max_streams);
m->processing_limit = 6; /* the original h1 max parallel connections */
m->last_mood_change = apr_time_now();
m->mood_update_interval = apr_time_from_msec(100);
@@ -338,7 +339,8 @@ h2_mplx *h2_mplx_c1_create(int child_num, apr_uint32_t id, h2_stream *stream0,
sizeof(h2_c2_transit*));
m->producer = h2_workers_register(workers, m->pool,
- apr_psprintf(m->pool, "h2-%d", (int)m->id),
+ apr_psprintf(m->pool, "h2-%u",
+ (unsigned int)m->id),
c2_prod_next, c2_prod_done,
workers_shutdown, m);
return m;
@@ -403,11 +405,11 @@ static int m_report_stream_iter(void *ctx, void *val) {
if (conn_ctx) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
H2_STRM_MSG(stream, "->03198: %s %s %s"
- "[started=%d/done=%d]"),
+ "[started=%u/done=%u]"),
conn_ctx->request->method, conn_ctx->request->authority,
conn_ctx->request->path,
- (int)apr_atomic_read32(&conn_ctx->started),
- (int)apr_atomic_read32(&conn_ctx->done));
+ apr_atomic_read32(&conn_ctx->started),
+ apr_atomic_read32(&conn_ctx->done));
}
else {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
@@ -442,7 +444,8 @@ static int m_stream_cancel_iter(void *ctx, void *val) {
void h2_mplx_c1_destroy(h2_mplx *m)
{
apr_status_t status;
- int i, wait_secs = 60, old_aborted;
+ unsigned int i, wait_secs = 60;
+ int old_aborted;
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
H2_MPLX_MSG(m, "start release"));
@@ -461,9 +464,10 @@ void h2_mplx_c1_destroy(h2_mplx *m)
/* How to shut down a h2 connection:
* 1. cancel all streams still active */
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
- H2_MPLX_MSG(m, "release, %d/%d/%d streams (total/hold/purge), %d streams"),
- (int)h2_ihash_count(m->streams),
- (int)h2_ihash_count(m->shold), m->spurge->nelts, m->processing_count);
+ H2_MPLX_MSG(m, "release, %u/%u/%d streams (total/hold/purge), %d streams"),
+ h2_ihash_count(m->streams),
+ h2_ihash_count(m->shold),
+ m->spurge->nelts, m->processing_count);
while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
/* until empty */
}
@@ -487,8 +491,8 @@ void h2_mplx_c1_destroy(h2_mplx *m)
/* This can happen if we have very long running requests
* that do not time out on IO. */
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, APLOGNO(03198)
- H2_MPLX_MSG(m, "waited %d sec for %d streams"),
- i*wait_secs, (int)h2_ihash_count(m->shold));
+ H2_MPLX_MSG(m, "waited %u sec for %u streams"),
+ i*wait_secs, h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_report_stream_iter, m);
}
}
@@ -501,8 +505,8 @@ void h2_mplx_c1_destroy(h2_mplx *m)
ap_assert(m->processing_count == 0);
if (!h2_ihash_empty(m->shold)) {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, APLOGNO(03516)
- H2_MPLX_MSG(m, "unexpected %d streams in hold"),
- (int)h2_ihash_count(m->shold));
+ H2_MPLX_MSG(m, "unexpected %u streams in hold"),
+ h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
}
@@ -514,14 +518,14 @@ void h2_mplx_c1_destroy(h2_mplx *m)
}
apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, h2_stream *stream,
- int *pstream_count)
+ unsigned int *pstream_count)
{
H2_MPLX_ENTER(m);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
H2_STRM_MSG(stream, "cleanup"));
m_stream_cleanup(m, stream);
- *pstream_count = (int)h2_ihash_count(m->streams);
+ *pstream_count = h2_ihash_count(m->streams);
H2_MPLX_LEAVE(m);
return APR_SUCCESS;
}
@@ -675,7 +679,7 @@ void h2_mplx_c1_process(h2_mplx *m,
h2_stream_get_fn *get_stream,
h2_stream_pri_cmp_fn *stream_pri_cmp,
h2_session *session,
- int *pstream_count)
+ unsigned int *pstream_count)
{
apr_status_t rv;
int sid;
@@ -705,7 +709,7 @@ void h2_mplx_c1_process(h2_mplx *m,
H2_MPLX_MSG(m, "activate at workers"));
}
}
- *pstream_count = (int)h2_ihash_count(m->streams);
+ *pstream_count = h2_ihash_count(m->streams);
#if APR_POOL_DEBUG
do {
@@ -819,7 +823,7 @@ static conn_rec *s_next_c2(h2_mplx *m)
{
h2_stream *stream = NULL;
apr_status_t rv = APR_SUCCESS;
- int sid;
+ apr_uint32_t sid;
conn_rec *c2 = NULL;
h2_c2_transit *transit = NULL;
diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
index 2382e46cf4..8b1feb80f7 100644
--- a/modules/http2/h2_mplx.h
+++ b/modules/http2/h2_mplx.h
@@ -75,16 +75,16 @@ struct h2_mplx {
struct h2_iqueue *q; /* all stream ids that need to be started */
apr_size_t stream_max_mem; /* max memory to buffer for a stream */
- int max_streams; /* max # of concurrent streams */
- int max_stream_id_started; /* highest stream id that started processing */
+ apr_uint32_t max_streams; /* max # of concurrent streams */
+ apr_uint32_t max_stream_id_started; /* highest stream id that started processing */
- int processing_count; /* # of c2 working for this mplx */
- int processing_limit; /* current limit on processing c2s, dynamic */
- int processing_max; /* max, hard limit of processing c2s */
+ apr_uint32_t processing_count; /* # of c2 working for this mplx */
+ apr_uint32_t processing_limit; /* current limit on processing c2s, dynamic */
+ apr_uint32_t processing_max; /* max, hard limit of processing c2s */
apr_time_t last_mood_change; /* last time, processing limit changed */
apr_interval_time_t mood_update_interval; /* how frequent we update at most */
- int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
+ apr_uint32_t irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
apr_thread_mutex_t *lock;
struct apr_thread_cond_t *join_wait;
@@ -101,7 +101,7 @@ struct h2_mplx {
request_rec *scratch_r; /* pseudo request_rec for scoreboard reporting */
- apr_size_t max_spare_transits; /* max number of transit pools idling */
+ apr_uint32_t max_spare_transits; /* max number of transit pools idling */
apr_array_header_t *c2_transits; /* base pools for running c2 connections */
};
@@ -139,7 +139,7 @@ int h2_mplx_c1_shutdown(h2_mplx *m);
* @param pstream_count return the number of streams active
*/
apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, struct h2_stream *stream,
- int *pstream_count);
+ unsigned int *pstream_count);
int h2_mplx_c1_stream_is_running(h2_mplx *m, struct h2_stream *stream);
@@ -157,7 +157,7 @@ void h2_mplx_c1_process(h2_mplx *m,
h2_stream_get_fn *get_stream,
h2_stream_pri_cmp_fn *cmp,
struct h2_session *session,
- int *pstream_count);
+ unsigned int *pstream_count);
/**
* Stream priorities have changed, reschedule pending requests.
diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
index 36b177b76a..bea353dca1 100644
--- a/modules/http2/h2_proxy_session.c
+++ b/modules/http2/h2_proxy_session.c
@@ -631,7 +631,7 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id,
}
if (status == APR_SUCCESS) {
- ssize_t readlen = 0;
+ size_t readlen = 0;
while (status == APR_SUCCESS
&& (readlen < length)
&& !APR_BRIGADE_EMPTY(stream->input)) {
@@ -650,7 +650,7 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id,
status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
if (status == APR_SUCCESS && blen > 0) {
- ssize_t copylen = H2MIN(length - readlen, blen);
+ size_t copylen = H2MIN(length - readlen, blen);
memcpy(buf, bdata, copylen);
buf += copylen;
readlen += copylen;
@@ -964,7 +964,7 @@ static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *
apr_status_t status = APR_SUCCESS;
apr_size_t readlen = 0;
ssize_t n;
-
+
while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
apr_bucket* b = APR_BRIGADE_FIRST(bb);
@@ -987,9 +987,10 @@ static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *
}
}
else {
- readlen += n;
- if (n < blen) {
- apr_bucket_split(b, n);
+ size_t rlen = (size_t)n;
+ readlen += rlen;
+ if (rlen < blen) {
+ apr_bucket_split(b, rlen);
}
}
}
diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c
index c1b78fc096..dc69ec0636 100644
--- a/modules/http2/h2_proxy_util.c
+++ b/modules/http2/h2_proxy_util.c
@@ -496,7 +496,7 @@ static int ignore_header(const literal *lits, size_t llen,
const char *name, size_t nlen)
{
const literal *lit;
- int i;
+ size_t i;
for (i = 0; i < llen; ++i) {
lit = &lits[i];
@@ -951,7 +951,7 @@ static void map_link(link_ctx *ctx)
{
if (ctx->link_start < ctx->link_end) {
char buffer[HUGE_STRING_LEN];
- int need_len, link_len, buffer_len, prepend_p_server;
+ size_t need_len, link_len, buffer_len, prepend_p_server;
const char *mapped;
buffer[0] = '\0';
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
index eaabffd3f1..c87dfa04db 100644
--- a/modules/http2/h2_push.c
+++ b/modules/http2/h2_push.c
@@ -481,8 +481,7 @@ static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push
EVP_MD_CTX *md;
apr_uint64_t val;
unsigned char hash[EVP_MAX_MD_SIZE];
- unsigned len;
- int i;
+ unsigned len, i;
md = EVP_MD_CTX_create();
ap_assert(md != NULL);
@@ -599,7 +598,7 @@ static void move_to_last(h2_push_diary *diary, apr_size_t idx)
{
h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
h2_push_diary_entry e;
- int lastidx;
+ apr_size_t lastidx;
/* Move an existing entry to the last place */
if (diary->entries->nelts <= 0)
@@ -792,11 +791,11 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
int maxP, const char *authority,
const char **pdata, apr_size_t *plen)
{
- int nelts, N, i;
+ int nelts, N;
unsigned char log2n, log2pmax;
gset_encoder encoder;
apr_uint64_t *hashes;
- apr_size_t hash_count;
+ apr_size_t hash_count, i;
nelts = diary->entries->nelts;
N = ceil_power_of_2(nelts);
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
index 1056d4b356..92d3f0a7e6 100644
--- a/modules/http2/h2_session.c
+++ b/modules/http2/h2_session.c
@@ -530,7 +530,7 @@ static int on_send_data_cb(nghttp2_session *ngh2,
apr_brigade_cleanup(session->bbtmp);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
- else if (len != length) {
+ else if (len != (apr_off_t)length) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, "
"got %ld from stream"), (long)length, (long)len);
@@ -636,8 +636,8 @@ static ssize_t select_padding_cb(nghttp2_session *ngh2,
size_t max_payloadlen, void *user_data)
{
h2_session *session = user_data;
- ssize_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
- ssize_t padded_len = frame_len;
+ size_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
+ size_t padded_len = frame_len;
/* Determine # of padding bytes to append to frame. Unless session->padding_always
* the number my be capped by the ui.write_size that currently applies.
diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
index 2cf2856efb..fbddfdd2a3 100644
--- a/modules/http2/h2_session.h
+++ b/modules/http2/h2_session.h
@@ -73,7 +73,7 @@ typedef struct h2_session {
struct h2_workers *workers; /* for executing streams */
struct h2_c1_io_in_ctx_t *cin; /* connection input filter context */
h2_c1_io io; /* io on httpd conn filters */
- int padding_max; /* max number of padding bytes */
+ unsigned int padding_max; /* max number of padding bytes */
int padding_always; /* padding has precedence over I/O optimizations */
struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
@@ -89,14 +89,14 @@ typedef struct h2_session {
struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
struct h2_stream_monitor *monitor;/* monitor callbacks for streams */
- int open_streams; /* number of streams processing */
-
- int streams_done; /* number of http/2 streams handled */
- int responses_submitted; /* number of http/2 responses submitted */
- int streams_reset; /* number of http/2 streams reset by client */
- int pushes_promised; /* number of http/2 push promises submitted */
- int pushes_submitted; /* number of http/2 pushed responses submitted */
- int pushes_reset; /* number of http/2 pushed reset by client */
+ unsigned int open_streams; /* number of streams processing */
+
+ unsigned int streams_done; /* number of http/2 streams handled */
+ unsigned int responses_submitted; /* number of http/2 responses submitted */
+ unsigned int streams_reset; /* number of http/2 streams reset by client */
+ unsigned int pushes_promised; /* number of http/2 push promises submitted */
+ unsigned int pushes_submitted; /* number of http/2 pushed responses submitted */
+ unsigned int pushes_reset; /* number of http/2 pushed reset by client */
apr_size_t frames_received; /* number of http/2 frames received */
apr_size_t frames_sent; /* number of http/2 frames sent */
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
index a884af4a40..2fc9b70b0a 100644
--- a/modules/http2/h2_stream.c
+++ b/modules/http2/h2_stream.c
@@ -147,7 +147,7 @@ static int on_frame(h2_stream_state_t state, int frame_type,
{
ap_assert(frame_type >= 0);
ap_assert(state >= 0);
- if (frame_type >= maxlen) {
+ if (frame_type < 0 || (apr_size_t)frame_type >= maxlen) {
return state; /* NOP, ignore unknown frame types */
}
return on_map(state, frame_map[frame_type]);
@@ -319,7 +319,7 @@ static void on_state_invalid(h2_stream *stream)
static apr_status_t transit(h2_stream *stream, int new_state)
{
- if (new_state == stream->state) {
+ if ((h2_stream_state_t)new_state == stream->state) {
return APR_SUCCESS;
}
else if (new_state < 0) {
@@ -379,7 +379,7 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
AP_DEBUG_ASSERT(new_state > S_XXX);
return;
}
- else if (new_state == stream->state) {
+ else if ((h2_stream_state_t)new_state == stream->state) {
/* nop */
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
H2_STRM_MSG(stream, "non-state event %d"), ev);
@@ -668,7 +668,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
}
if (name[0] == ':') {
- if ((vlen) > session->s->limit_req_line) {
+ if (vlen > APR_INT32_MAX || (int)vlen > session->s->limit_req_line) {
/* pseudo header: approximation of request line size check */
if (!h2_stream_is_ready(stream)) {
ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
@@ -912,7 +912,8 @@ static apr_status_t buffer_output_receive(h2_stream *stream)
buf_len = h2_brigade_mem_size(stream->out_buffer);
}
- if (buf_len >= stream->session->max_stream_mem) {
+ if (buf_len > APR_INT32_MAX
+ || (apr_size_t)buf_len >= stream->session->max_stream_mem) {
/* we have buffered enough. No need to read more.
* However, we have now output pending for which we may not
* receive another poll event. We need to make sure that this
@@ -1396,7 +1397,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
* is requested. But we can reduce the size in case the master
* connection operates in smaller chunks. (TSL warmup) */
if (stream->session->io.write_size > 0) {
- apr_off_t chunk_len = stream->session->io.write_size - H2_FRAME_HDR_LEN;
+ apr_size_t chunk_len = stream->session->io.write_size - H2_FRAME_HDR_LEN;
if (length > chunk_len) {
length = chunk_len;
}
@@ -1405,7 +1406,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
/* How much data do we have in our buffers that we can write? */
check_and_receive:
buf_len = output_data_buffered(stream, &eos, &header_blocked);
- while (buf_len < length && !eos && !header_blocked) {
+ while (buf_len < (apr_off_t)length && !eos && !header_blocked) {
/* read more? */
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
H2_SSSN_STRM_MSG(session, stream_id,
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
index 90d4e7e176..1079d0b096 100644
--- a/modules/http2/h2_util.c
+++ b/modules/http2/h2_util.c
@@ -229,7 +229,7 @@ h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int)
return ih;
}
-size_t h2_ihash_count(h2_ihash_t *ih)
+unsigned int h2_ihash_count(h2_ihash_t *ih)
{
return apr_hash_count(ih->hash);
}
@@ -446,7 +446,7 @@ int h2_iq_shift(h2_iqueue *q)
size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max)
{
- int i;
+ size_t i;
for (i = 0; i < max; ++i) {
pint[i] = h2_iq_shift(q);
if (pint[i] == 0) {
@@ -1169,56 +1169,24 @@ apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra)
* h2_util for bucket brigades
******************************************************************************/
-static apr_status_t last_not_included(apr_bucket_brigade *bb,
- apr_off_t maxlen,
- apr_bucket **pend)
+static void fit_bucket_into(apr_bucket *b, apr_off_t *plen)
{
- apr_bucket *b;
- apr_status_t status = APR_SUCCESS;
-
- if (maxlen >= 0) {
- /* Find the bucket, up to which we reach maxlen/mem bytes */
- for (b = APR_BRIGADE_FIRST(bb);
- (b != APR_BRIGADE_SENTINEL(bb));
- b = APR_BUCKET_NEXT(b)) {
-
- if (APR_BUCKET_IS_METADATA(b)) {
- /* included */
- }
- else {
- if (b->length == ((apr_size_t)-1)) {
- const char *ign;
- apr_size_t ilen;
- status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
- if (status != APR_SUCCESS) {
- return status;
- }
- }
-
- if (maxlen == 0 && b->length > 0) {
- *pend = b;
- return status;
- }
-
- if (APR_BUCKET_IS_FILE(b)
-#if APR_HAS_MMAP
- || APR_BUCKET_IS_MMAP(b)
-#endif
- ) {
- /* we like to move it, always */
- }
- else if (maxlen < (apr_off_t)b->length) {
- apr_bucket_split(b, (apr_size_t)maxlen);
- maxlen = 0;
- }
- else {
- maxlen -= b->length;
- }
- }
- }
+ /* signed apr_off_t is at least as large as unsigned apr_size_t.
+ * Propblems may arise when they are both the same size. Then
+ * the bucket length *may* be larger than a value we can hold
+ * in apr_off_t. Before casting b->length to apr_off_t we must
+ * check the limitations.
+ * After we resized the bucket, it is safe to cast and substract.
+ */
+ if ((sizeof(apr_off_t) == sizeof(apr_int64_t)
+ && b->length > APR_INT64_MAX)
+ || (sizeof(apr_off_t) == sizeof(apr_int32_t)
+ && b->length > APR_INT32_MAX)
+ || *plen < (apr_off_t)b->length) {
+ /* bucket is longer the *plen */
+ apr_bucket_split(b, *plen);
}
- *pend = APR_BRIGADE_SENTINEL(bb);
- return status;
+ *plen -= (apr_off_t)b->length;
}
apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
@@ -1237,29 +1205,20 @@ apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
APR_BRIGADE_INSERT_TAIL(dest, b);
}
else {
- if (remain == b->length) {
- /* fall through */
- }
- else if (remain <= 0) {
+ if (remain <= 0) {
return status;
}
- else {
- if (b->length == ((apr_size_t)-1)) {
- const char *ign;
- apr_size_t ilen;
- status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
- if (status != APR_SUCCESS) {
- return status;
- }
- }
-
- if (remain < b->length) {
- apr_bucket_split(b, remain);
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
}
}
+ fit_bucket_into(b, &remain);
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(dest, b);
- remain -= b->length;
}
}
return status;
@@ -1282,86 +1241,28 @@ apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
/* fall through */
}
else {
- if (remain == b->length) {
- /* fall through */
- }
- else if (remain <= 0) {
+ if (remain <= 0) {
return status;
}
- else {
- if (b->length == ((apr_size_t)-1)) {
- const char *ign;
- apr_size_t ilen;
- status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
- if (status != APR_SUCCESS) {
- return status;
- }
- }
-
- if (remain < b->length) {
- apr_bucket_split(b, remain);
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
}
}
+ fit_bucket_into(b, &remain);
}
status = apr_bucket_copy(b, &b);
if (status != APR_SUCCESS) {
return status;
}
APR_BRIGADE_INSERT_TAIL(dest, b);
- remain -= b->length;
}
return status;
}
-int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len)
-{
- apr_bucket *b, *end;
-
- apr_status_t status = last_not_included(bb, len, &end);
- if (status != APR_SUCCESS) {
- return status;
- }
-
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb) && b != end;
- b = APR_BUCKET_NEXT(b))
- {
- if (APR_BUCKET_IS_EOS(b)) {
- return 1;
- }
- }
- return 0;
-}
-
-apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos)
-{
- apr_status_t status;
- apr_off_t blen = 0;
-
- /* test read to determine available length */
- status = apr_brigade_length(bb, 1, &blen);
- if (status != APR_SUCCESS) {
- return status;
- }
- else if (blen == 0) {
- /* brigade without data, does it have an EOS bucket somewhere? */
- *plen = 0;
- *peos = h2_util_has_eos(bb, -1);
- }
- else {
- /* data in the brigade, limit the length returned. Check for EOS
- * bucket only if we indicate data. This is required since plen == 0
- * means "the whole brigade" for h2_util_has_eos()
- */
- if (blen < *plen || *plen < 0) {
- *plen = blen;
- }
- *peos = h2_util_has_eos(bb, *plen);
- }
- return APR_SUCCESS;
-}
-
apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
apr_bucket *b, const char *sep)
{
@@ -1422,10 +1323,11 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to,
h2_bucket_gate *should_append)
{
apr_bucket *e;
- apr_off_t len = 0, remain = *plen;
+ apr_off_t start, remain;
apr_status_t rv;
*peos = 0;
+ start = remain = *plen;
while (!APR_BRIGADE_EMPTY(from)) {
e = APR_BRIGADE_FIRST(from);
@@ -1441,7 +1343,10 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to,
}
}
else {
- if (remain > 0 && e->length == ((apr_size_t)-1)) {
+ if (remain <= 0) {
+ goto leave;
+ }
+ if (e->length == ((apr_size_t)-1)) {
const char *ign;
apr_size_t ilen;
rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ);
@@ -1449,22 +1354,13 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to,
return rv;
}
}
-
- if (remain < e->length) {
- if (remain <= 0) {
- goto leave;
- }
- apr_bucket_split(e, (apr_size_t)remain);
- }
+ fit_bucket_into(e, &remain);
}
-
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(to, e);
- len += e->length;
- remain -= e->length;
}
leave:
- *plen = len;
+ *plen = start - remain;
return APR_SUCCESS;
}
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
index 5a3dbe5142..02e8178ebb 100644
--- a/modules/http2/h2_util.h
+++ b/modules/http2/h2_util.h
@@ -45,7 +45,7 @@ typedef int h2_ihash_iter_t(void *ctx, void *val);
*/
h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
-size_t h2_ihash_count(h2_ihash_t *ih);
+unsigned int h2_ihash_count(h2_ihash_t *ih);
int h2_ihash_empty(h2_ihash_t *ih);
void *h2_ihash_get(h2_ihash_t *ih, int id);
@@ -432,24 +432,7 @@ apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
apr_bucket_brigade *src,
apr_off_t length);
-/**
- * Return != 0 iff there is a FLUSH or EOS bucket in the brigade.
- * @param bb the brigade to check on
- * @return != 0 iff brigade holds FLUSH or EOS bucket (or both)
- */
-int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len);
-
-/**
- * Check how many bytes of the desired amount are available and if the
- * end of stream is reached by that amount.
- * @param bb the brigade to check
- * @param plen the desired length and, on return, the available length
- * @param on return, if eos has been reached
- */
-apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos);
-
-typedef apr_status_t h2_util_pass_cb(void *ctx,
+typedef apr_status_t h2_util_pass_cb(void *ctx,
const char *data, apr_off_t len);
/**
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
index 1732f945a5..b95c0fa84b 100644
--- a/modules/http2/h2_workers.c
+++ b/modules/http2/h2_workers.c
@@ -60,7 +60,7 @@ typedef enum {
typedef struct h2_slot h2_slot;
struct h2_slot {
APR_RING_ENTRY(h2_slot) link;
- int id;
+ apr_uint32_t id;
apr_pool_t *pool;
h2_slot_state_t state;
volatile int should_shutdown;
@@ -422,7 +422,8 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild,
h2_workers *workers;
apr_pool_t *pool;
apr_allocator_t *allocator;
- int i, locked = 0;
+ int locked = 0;
+ apr_uint32_t i;
ap_assert(s);
ap_assert(pchild);
@@ -528,7 +529,7 @@ cleanup:
return NULL;
}
-apr_size_t h2_workers_get_max_workers(h2_workers *workers)
+apr_uint32_t h2_workers_get_max_workers(h2_workers *workers)
{
return workers->max_slots;
}
diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h
index 5cbf16e400..c219304fa8 100644
--- a/modules/http2/h2_workers.h
+++ b/modules/http2/h2_workers.h
@@ -53,7 +53,7 @@ void h2_workers_shutdown(h2_workers *workers, int graceful);
/**
* Get the maximum number of workers.
*/
-apr_size_t h2_workers_get_max_workers(h2_workers *workers);
+apr_uint32_t h2_workers_get_max_workers(h2_workers *workers);
/**
* ap_conn_producer_t is the source of connections (conn_rec*) to run.
diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
index a4800c148b..ddef2bee06 100644
--- a/modules/http2/mod_http2.c
+++ b/modules/http2/mod_http2.c
@@ -317,7 +317,7 @@ static int http2_is_h2(conn_rec *c)
static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
conn_rec *c, request_rec *r, char *name)
{
- int i;
+ unsigned int i;
/* If the # of vars grow, we need to put definitions in a hash */
for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
h2_var_def *vdef = &H2_VARS[i];
@@ -334,7 +334,7 @@ static int h2_h2_fixups(request_rec *r)
{
if (r->connection->master) {
h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection);
- int i;
+ unsigned int i;
apr_interval_time_t stream_timeout;
for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {