diff --git a/CVE-2020-11984.patch b/CVE-2020-11984.patch deleted file mode 100644 index 9189c37..0000000 --- a/CVE-2020-11984.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 0c543e3f5b3881d515d6235f152aacaaaf3aba72 Mon Sep 17 00:00:00 2001 -From: Yann Ylavic -Date: Fri, 24 Jul 2020 09:35:25 +0000 -Subject: [PATCH] Merge r1880205, r1880214 from trunk: - -mod_proxy_uwsgi: Error out on HTTP header larger than 16K - -The uwsgi protocol does not let us serialize more than 16K of HTTP header, -so fail early with 500 if it happens. - - -Follow up to r1880205, APLOGNO(). - - -Submitted by: ylavic -Reviewed by: ylavic, covener, icing - - -git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1880251 13f79535-47bb-0310-9956-ffa450edef68 ---- - modules/proxy/mod_proxy_uwsgi.c | 13 ++++++++++--- - 1 files changed, 10 insertions(+), 3 deletions(-) - -diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c -index 2ac2a95d2ef..0209ac4062e 100644 ---- a/modules/proxy/mod_proxy_uwsgi.c -+++ b/modules/proxy/mod_proxy_uwsgi.c -@@ -136,7 +136,7 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn) - int j; - - apr_size_t headerlen = 4; -- apr_uint16_t pktsize, keylen, vallen; -+ apr_size_t pktsize, keylen, vallen; - const char *script_name; - const char *path_info; - const char *auth; -@@ -178,6 +178,15 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn) - headerlen += 2 + strlen(env[j].key) + 2 + strlen(env[j].val); - } - -+ pktsize = headerlen - 4; -+ if (pktsize > APR_UINT16_MAX) { -+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10259) -+ "can't send headers to %s:%u: packet size too " -+ "large (%" APR_SIZE_T_FMT ")", -+ conn->hostname, conn->port, pktsize); -+ return HTTP_INTERNAL_SERVER_ERROR; -+ } -+ - ptr = buf = apr_palloc(r->pool, headerlen); - - ptr += 4; -@@ -196,8 +205,6 @@ static int uwsgi_send_headers(request_rec *r, proxy_conn_rec * conn) - ptr += vallen; - } - -- pktsize = headerlen - 4; -- - buf[0] = 0; - buf[1] = (apr_byte_t) (pktsize & 0xff); - buf[2] = (apr_byte_t) ((pktsize >> 8) & 0xff); diff --git a/CVE-2020-11993.patch b/CVE-2020-11993.patch deleted file mode 100644 index b1e3ee9..0000000 --- a/CVE-2020-11993.patch +++ /dev/null @@ -1,1902 +0,0 @@ -From 63a0a87efa0925514d15c211b508f6594669888c Mon Sep 17 00:00:00 2001 -From: Graham Leggett -Date: Wed, 8 Jul 2020 11:53:48 +0000 -Subject: [PATCH] *) mod_http2: connection terminology renamed to - master/secondary. trunk patch: http://svn.apache.org/r1878926 - http://svn.apache.org/r1879156 2.4.x patch: - https://svn.apache.org/repos/asf/httpd/httpd/patches/2.4.x/h2-master-secondary.patch - +1: icing, ylavic, minfrin ylavic: nitpicking, mixed - "H2_secondary_IN" and "H2_secondary_OUT" case to register the - filters, but not for adding them. IIRC filters names are case - insentive so shouldn't matter, just popped at my eyes.. icing: updated - patch and added r1879156 to fix the eye bleed. jailletc36: CHANGES could - also be looked at if it makes sense to update the terminology - also here - -git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1879642 13f79535-47bb-0310-9956-ffa450edef68 ---- - modules/http2/h2_conn.c | 52 +++---- - modules/http2/h2_conn.h | 8 +- - modules/http2/h2_filter.c | 4 +- - modules/http2/h2_h2.c | 10 +- - modules/http2/h2_mplx.c | 283 +++++++++++++++++++------------------ - modules/http2/h2_mplx.h | 160 ++++++--------------- - modules/http2/h2_request.c | 7 +- - modules/http2/h2_session.c | 30 ++-- - modules/http2/h2_session.h | 2 +- - modules/http2/h2_stream.c | 2 +- - modules/http2/h2_task.c | 68 ++++----- - modules/http2/h2_task.h | 2 +- - modules/http2/h2_workers.c | 6 +- - modules/http2/mod_http2.c | 4 +- - 14 files changed, 285 insertions(+), 353 deletions(-) - -diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c -index a330cc8..17a2d48 100644 ---- a/modules/http2/h2_conn.c -+++ b/modules/http2/h2_conn.c -@@ -138,7 +138,7 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) - ap_register_input_filter("H2_IN", h2_filter_core_input, - NULL, AP_FTYPE_CONNECTION); - -- status = h2_mplx_child_init(pool, s); -+ status = h2_mplx_m_child_init(pool, s); - - if (status == APR_SUCCESS) { - status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM, -@@ -260,7 +260,7 @@ apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c) - return DONE; - } - --conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) -+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent) - { - apr_allocator_t *allocator; - apr_status_t status; -@@ -271,7 +271,7 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) - - ap_assert(master); - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master, -- "h2_stream(%ld-%d): create slave", master->id, slave_id); -+ "h2_stream(%ld-%d): create secondary", master->id, sec_id); - - /* We create a pool with its own allocator to be used for - * processing a request. This is the only way to have the processing -@@ -284,18 +284,18 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) - status = apr_pool_create_ex(&pool, parent, NULL, allocator); - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master, -- APLOGNO(10004) "h2_session(%ld-%d): create slave pool", -- master->id, slave_id); -+ APLOGNO(10004) "h2_session(%ld-%d): create secondary pool", -+ master->id, sec_id); - return NULL; - } - apr_allocator_owner_set(allocator, pool); -- apr_pool_tag(pool, "h2_slave_conn"); -+ apr_pool_tag(pool, "h2_secondary_conn"); - - c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec)); - if (c == NULL) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, -- APLOGNO(02913) "h2_session(%ld-%d): create slave", -- master->id, slave_id); -+ APLOGNO(02913) "h2_session(%ld-%d): create secondary", -+ master->id, sec_id); - apr_pool_destroy(pool); - return NULL; - } -@@ -322,19 +322,19 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) - c->clogging_input_filters = 1; - c->log = NULL; - c->log_id = apr_psprintf(pool, "%ld-%d", -- master->id, slave_id); -+ master->id, sec_id); - c->aborted = 0; -- /* We cannot install the master connection socket on the slaves, as -+ /* We cannot install the master connection socket on the secondary, as - * modules mess with timeouts/blocking of the socket, with - * unwanted side effects to the master connection processing. -- * Fortunately, since we never use the slave socket, we can just install -+ * Fortunately, since we never use the secondary socket, we can just install - * a single, process-wide dummy and everyone is happy. - */ - ap_set_module_config(c->conn_config, &core_module, dummy_socket); - /* TODO: these should be unique to this thread */ - c->sbh = master->sbh; -- /* TODO: not all mpm modules have learned about slave connections yet. -- * copy their config from master to slave. -+ /* TODO: not all mpm modules have learned about secondary connections yet. -+ * copy their config from master to secondary. - */ - if ((mpm = h2_conn_mpm_module()) != NULL) { - cfg = ap_get_module_config(master->conn_config, mpm); -@@ -342,38 +342,38 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) - } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, -- "h2_slave(%s): created", c->log_id); -+ "h2_secondary(%s): created", c->log_id); - return c; - } - --void h2_slave_destroy(conn_rec *slave) -+void h2_secondary_destroy(conn_rec *secondary) - { -- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave, -- "h2_slave(%s): destroy", slave->log_id); -- slave->sbh = NULL; -- apr_pool_destroy(slave->pool); -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, secondary, -+ "h2_secondary(%s): destroy", secondary->log_id); -+ secondary->sbh = NULL; -+ apr_pool_destroy(secondary->pool); - } - --apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd) -+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd) - { -- if (slave->keepalives == 0) { -+ if (secondary->keepalives == 0) { - /* Simulate that we had already a request on this connection. Some - * hooks trigger special behaviour when keepalives is 0. - * (Not necessarily in pre_connection, but later. Set it here, so it - * is in place.) */ -- slave->keepalives = 1; -+ secondary->keepalives = 1; - /* We signal that this connection will be closed after the request. - * Which is true in that sense that we throw away all traffic data -- * on this slave connection after each requests. Although we might -+ * on this secondary connection after each requests. Although we might - * reuse internal structures like memory pools. - * The wanted effect of this is that httpd does not try to clean up - * any dangling data on this connection when a request is done. Which - * is unnecessary on a h2 stream. - */ -- slave->keepalive = AP_CONN_CLOSE; -- return ap_run_pre_connection(slave, csd); -+ secondary->keepalive = AP_CONN_CLOSE; -+ return ap_run_pre_connection(secondary, csd); - } -- ap_assert(slave->output_filters); -+ ap_assert(secondary->output_filters); - return APR_SUCCESS; - } - -diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_conn.h -index c560405..3b8b33e 100644 ---- a/modules/http2/h2_conn.h -+++ b/modules/http2/h2_conn.h -@@ -68,10 +68,10 @@ h2_mpm_type_t h2_conn_mpm_type(void); - const char *h2_conn_mpm_name(void); - int h2_mpm_supported(void); - --conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent); --void h2_slave_destroy(conn_rec *slave); -+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent); -+void h2_secondary_destroy(conn_rec *secondary); - --apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd); --void h2_slave_run_connection(conn_rec *slave); -+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd); -+void h2_secondary_run_connection(conn_rec *secondary); - - #endif /* defined(__mod_h2__h2_conn__) */ -diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c -index 2fc5e12..d9257fa 100644 ---- a/modules/http2/h2_filter.c -+++ b/modules/http2/h2_filter.c -@@ -370,7 +370,7 @@ static void add_streams(apr_bucket_brigade *bb, h2_session *s, int last) - x.s = s; - x.idx = 0; - bbout(bb, " \"streams\": {"); -- h2_mplx_stream_do(s->mplx, add_stream, &x); -+ h2_mplx_m_stream_do(s->mplx, add_stream, &x); - bbout(bb, "\n }%s\n", last? "" : ","); - } - -@@ -433,7 +433,7 @@ static void add_stats(apr_bucket_brigade *bb, h2_session *s, - static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b) - { - h2_mplx *m = task->mplx; -- h2_stream *stream = h2_mplx_stream_get(m, task->stream_id); -+ h2_stream *stream = h2_mplx_t_stream_get(m, task); - h2_session *s; - conn_rec *c; - -diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c -index 1b69fe3..96704b4 100644 ---- a/modules/http2/h2_h2.c -+++ b/modules/http2/h2_h2.c -@@ -666,7 +666,7 @@ static int h2_h2_pre_close_conn(conn_rec *c) - { - h2_ctx *ctx; - -- /* slave connection? */ -+ /* secondary connection? */ - if (c->master) { - return DECLINED; - } -@@ -710,7 +710,7 @@ static void check_push(request_rec *r, const char *tag) - - static int h2_h2_post_read_req(request_rec *r) - { -- /* slave connection? */ -+ /* secondary connection? */ - if (r->connection->master) { - struct h2_task *task = h2_ctx_get_task(r->connection); - /* This hook will get called twice on internal redirects. Take care -@@ -729,7 +729,7 @@ static int h2_h2_post_read_req(request_rec *r) - ap_add_output_filter("H2_RESPONSE", task, r, r->connection); - - for (f = r->input_filters; f; f = f->next) { -- if (!strcmp("H2_SLAVE_IN", f->frec->name)) { -+ if (!strcmp("H2_SECONDARY_IN", f->frec->name)) { - f->r = r; - break; - } -@@ -743,7 +743,7 @@ static int h2_h2_post_read_req(request_rec *r) - - static int h2_h2_late_fixups(request_rec *r) - { -- /* slave connection? */ -+ /* secondary connection? */ - if (r->connection->master) { - struct h2_task *task = h2_ctx_get_task(r->connection); - if (task) { -@@ -751,7 +751,7 @@ static int h2_h2_late_fixups(request_rec *r) - task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES); - if (task->output.copy_files) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, -- "h2_slave_out(%s): copy_files on", task->id); -+ "h2_secondary_out(%s): copy_files on", task->id); - h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL); - } - check_push(r, "late_fixup"); -diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c -index 9a5f355..62c381d 100644 ---- a/modules/http2/h2_mplx.c -+++ b/modules/http2/h2_mplx.c -@@ -56,10 +56,18 @@ typedef struct { - apr_size_t count; - } stream_iter_ctx; - --static apr_status_t mplx_be_happy(h2_mplx *m); --static apr_status_t mplx_be_annoyed(h2_mplx *m); -+/** -+ * Naming convention for static functions: -+ * - m_*: function only called from the master connection -+ * - s_*: function only called from a secondary connection -+ * - t_*: function only called from a h2_task holder -+ * - mst_*: function called from everyone -+ */ - --apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) -+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task); -+static apr_status_t m_be_annoyed(h2_mplx *m); -+ -+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s) - { - return APR_SUCCESS; - } -@@ -81,26 +89,25 @@ apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) - #define H2_MPLX_LEAVE_MAYBE(m, dolock) \ - if (dolock) apr_thread_mutex_unlock(m->lock) - --static void check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked); -+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked); - --static void stream_output_consumed(void *ctx, -- h2_bucket_beam *beam, apr_off_t length) -+static void mst_stream_output_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length) - { - } - --static void stream_input_ev(void *ctx, h2_bucket_beam *beam) -+static void mst_stream_input_ev(void *ctx, h2_bucket_beam *beam) - { - h2_stream *stream = ctx; - h2_mplx *m = stream->session->mplx; - apr_atomic_set32(&m->event_pending, 1); - } - --static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length) -+static void m_stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length) - { - h2_stream_in_consumed(ctx, length); - } - --static void stream_joined(h2_mplx *m, h2_stream *stream) -+static void ms_stream_joined(h2_mplx *m, h2_stream *stream) - { - ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done); - -@@ -109,7 +116,7 @@ static void stream_joined(h2_mplx *m, h2_stream *stream) - h2_ihash_add(m->spurge, stream); - } - --static void stream_cleanup(h2_mplx *m, h2_stream *stream) -+static void m_stream_cleanup(h2_mplx *m, h2_stream *stream) - { - ap_assert(stream->state == H2_SS_CLEANUP); - -@@ -128,7 +135,7 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream) - h2_iq_remove(m->q, stream->id); - - if (!h2_task_has_started(stream->task) || stream->task->done_done) { -- stream_joined(m, stream); -+ ms_stream_joined(m, stream); - } - else { - h2_ififo_remove(m->readyq, stream->id); -@@ -150,8 +157,8 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream) - * their HTTP/1 cousins, the separate allocator seems to work better - * than protecting a shared h2_session one with an own lock. - */ --h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent, -- h2_workers *workers) -+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *parent, -+ h2_workers *workers) - { - apr_status_t status = APR_SUCCESS; - apr_allocator_t *allocator; -@@ -165,7 +172,7 @@ h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent, - m->s = s; - - /* We create a pool with its own allocator to be used for -- * processing slave connections. This is the only way to have the -+ * processing secondary connections. This is the only way to have the - * processing independent of its parent pool in the sense that it - * can work in another thread. Also, the new allocator needs its own - * mutex to synchronize sub-pools. -@@ -217,12 +224,12 @@ h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent, - m->last_mood_change = apr_time_now(); - m->mood_update_interval = apr_time_from_msec(100); - -- m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*)); -+ m->spare_secondary = apr_array_make(m->pool, 10, sizeof(conn_rec*)); - } - return m; - } - --int h2_mplx_shutdown(h2_mplx *m) -+int h2_mplx_m_shutdown(h2_mplx *m) - { - int max_stream_started = 0; - -@@ -236,7 +243,7 @@ int h2_mplx_shutdown(h2_mplx *m) - return max_stream_started; - } - --static int input_consumed_signal(h2_mplx *m, h2_stream *stream) -+static int m_input_consumed_signal(h2_mplx *m, h2_stream *stream) - { - if (stream->input) { - return h2_beam_report_consumption(stream->input); -@@ -244,12 +251,12 @@ static int input_consumed_signal(h2_mplx *m, h2_stream *stream) - return 0; - } - --static int report_consumption_iter(void *ctx, void *val) -+static int m_report_consumption_iter(void *ctx, void *val) - { - h2_stream *stream = val; - h2_mplx *m = ctx; - -- input_consumed_signal(m, stream); -+ m_input_consumed_signal(m, stream); - if (stream->state == H2_SS_CLOSED_L - && (!stream->task || stream->task->worker_done)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, -@@ -260,7 +267,7 @@ static int report_consumption_iter(void *ctx, void *val) - return 1; - } - --static int output_consumed_signal(h2_mplx *m, h2_task *task) -+static int s_output_consumed_signal(h2_mplx *m, h2_task *task) - { - if (task->output.beam) { - return h2_beam_report_consumption(task->output.beam); -@@ -268,7 +275,7 @@ static int output_consumed_signal(h2_mplx *m, h2_task *task) - return 0; - } - --static int stream_destroy_iter(void *ctx, void *val) -+static int m_stream_destroy_iter(void *ctx, void *val) - { - h2_mplx *m = ctx; - h2_stream *stream = val; -@@ -278,7 +285,7 @@ static int stream_destroy_iter(void *ctx, void *val) - - if (stream->input) { - /* Process outstanding events before destruction */ -- input_consumed_signal(m, stream); -+ m_input_consumed_signal(m, stream); - h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy"); - h2_beam_destroy(stream->input); - stream->input = NULL; -@@ -286,12 +293,12 @@ static int stream_destroy_iter(void *ctx, void *val) - - if (stream->task) { - h2_task *task = stream->task; -- conn_rec *slave; -- int reuse_slave = 0; -+ conn_rec *secondary; -+ int reuse_secondary = 0; - - stream->task = NULL; -- slave = task->c; -- if (slave) { -+ secondary = task->c; -+ if (secondary) { - /* On non-serialized requests, the IO logging has not accounted for any - * meta data send over the network: response headers and h2 frame headers. we - * counted this on the stream and need to add this now. -@@ -300,26 +307,25 @@ static int stream_destroy_iter(void *ctx, void *val) - if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) { - apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets; - if (unaccounted > 0) { -- h2_task_logio_add_bytes_out(slave, unaccounted); -+ h2_task_logio_add_bytes_out(secondary, unaccounted); - } - } - -- if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) { -- reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2)) -- && !task->rst_error); -+ if (m->s->keep_alive_max == 0 || secondary->keepalives < m->s->keep_alive_max) { -+ reuse_secondary = ((m->spare_secondary->nelts < (m->limit_active * 3 / 2)) -+ && !task->rst_error); - } - -- task->c = NULL; -- if (reuse_slave) { -+ if (reuse_secondary) { - h2_beam_log(task->output.beam, m->c, APLOG_DEBUG, -- APLOGNO(03385) "h2_task_destroy, reuse slave"); -+ APLOGNO(03385) "h2_task_destroy, reuse secondary"); - h2_task_destroy(task); -- APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave; -+ APR_ARRAY_PUSH(m->spare_secondary, conn_rec*) = secondary; - } - else { - h2_beam_log(task->output.beam, m->c, APLOG_TRACE1, -- "h2_task_destroy, destroy slave"); -- h2_slave_destroy(slave); -+ "h2_task_destroy, destroy secondary"); -+ h2_secondary_destroy(secondary); - } - } - } -@@ -327,11 +333,11 @@ static int stream_destroy_iter(void *ctx, void *val) - return 0; - } - --static void purge_streams(h2_mplx *m, int lock) -+static void m_purge_streams(h2_mplx *m, int lock) - { - if (!h2_ihash_empty(m->spurge)) { - H2_MPLX_ENTER_MAYBE(m, lock); -- while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) { -+ while (!h2_ihash_iter(m->spurge, m_stream_destroy_iter, m)) { - /* repeat until empty */ - } - H2_MPLX_LEAVE_MAYBE(m, lock); -@@ -343,13 +349,13 @@ typedef struct { - void *ctx; - } stream_iter_ctx_t; - --static int stream_iter_wrap(void *ctx, void *stream) -+static int m_stream_iter_wrap(void *ctx, void *stream) - { - stream_iter_ctx_t *x = ctx; - return x->cb(stream, x->ctx); - } - --apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx) -+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx) - { - stream_iter_ctx_t x; - -@@ -357,13 +363,13 @@ apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx) - - x.cb = cb; - x.ctx = ctx; -- h2_ihash_iter(m->streams, stream_iter_wrap, &x); -+ h2_ihash_iter(m->streams, m_stream_iter_wrap, &x); - - H2_MPLX_LEAVE(m); - return APR_SUCCESS; - } - --static int report_stream_iter(void *ctx, void *val) { -+static int m_report_stream_iter(void *ctx, void *val) { - h2_mplx *m = ctx; - h2_stream *stream = val; - h2_task *task = stream->task; -@@ -388,7 +394,7 @@ static int report_stream_iter(void *ctx, void *val) { - return 1; - } - --static int unexpected_stream_iter(void *ctx, void *val) { -+static int m_unexpected_stream_iter(void *ctx, void *val) { - h2_mplx *m = ctx; - h2_stream *stream = val; - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */ -@@ -397,7 +403,7 @@ static int unexpected_stream_iter(void *ctx, void *val) { - return 1; - } - --static int stream_cancel_iter(void *ctx, void *val) { -+static int m_stream_cancel_iter(void *ctx, void *val) { - h2_mplx *m = ctx; - h2_stream *stream = val; - -@@ -411,11 +417,11 @@ static int stream_cancel_iter(void *ctx, void *val) { - h2_stream_rst(stream, H2_ERR_NO_ERROR); - /* All connection data has been sent, simulate cleanup */ - h2_stream_dispatch(stream, H2_SEV_EOS_SENT); -- stream_cleanup(m, stream); -+ m_stream_cleanup(m, stream); - return 0; - } - --void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) -+void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) - { - apr_status_t status; - int i, wait_secs = 60, old_aborted; -@@ -429,7 +435,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) - - H2_MPLX_ENTER_ALWAYS(m); - -- /* While really terminating any slave connections, treat the master -+ /* While really terminating any secondary connections, treat the master - * connection as aborted. It's not as if we could send any more data - * at this point. */ - old_aborted = m->c->aborted; -@@ -441,7 +447,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) - "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks", - m->id, (int)h2_ihash_count(m->streams), - (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active); -- while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) { -+ while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) { - /* until empty */ - } - -@@ -463,7 +469,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198) - "h2_mplx(%ld): waited %d sec for %d tasks", - m->id, i*wait_secs, (int)h2_ihash_count(m->shold)); -- h2_ihash_iter(m->shold, report_stream_iter, m); -+ h2_ihash_iter(m->shold, m_report_stream_iter, m); - } - } - m->join_wait = NULL; -@@ -474,7 +480,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516) - "h2_mplx(%ld): unexpected %d streams in hold", - m->id, (int)h2_ihash_count(m->shold)); -- h2_ihash_iter(m->shold, unexpected_stream_iter, m); -+ h2_ihash_iter(m->shold, m_unexpected_stream_iter, m); - } - - m->c->aborted = old_aborted; -@@ -483,39 +489,39 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id); - } - --apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream) -+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, h2_stream *stream) - { - H2_MPLX_ENTER(m); - - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - H2_STRM_MSG(stream, "cleanup")); -- stream_cleanup(m, stream); -+ m_stream_cleanup(m, stream); - - H2_MPLX_LEAVE(m); - return APR_SUCCESS; - } - --h2_stream *h2_mplx_stream_get(h2_mplx *m, int id) -+h2_stream *h2_mplx_t_stream_get(h2_mplx *m, h2_task *task) - { - h2_stream *s = NULL; - - H2_MPLX_ENTER_ALWAYS(m); - -- s = h2_ihash_get(m->streams, id); -+ s = h2_ihash_get(m->streams, task->stream_id); - - H2_MPLX_LEAVE(m); - return s; - } - --static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes) -+static void mst_output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes) - { - h2_stream *stream = ctx; - h2_mplx *m = stream->session->mplx; - -- check_data_for(m, stream, 0); -+ mst_check_data_for(m, stream, 0); - } - --static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) -+static apr_status_t t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) - { - h2_stream *stream = h2_ihash_get(m->streams, stream_id); - -@@ -527,26 +533,26 @@ static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) - stream->output = beam; - - if (APLOGctrace2(m->c)) { -- h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open"); -+ h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open"); - } - else { -- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c, - "h2_mplx(%s): out open", stream->task->id); - } - -- h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream); -- h2_beam_on_produced(stream->output, output_produced, stream); -+ h2_beam_on_consumed(stream->output, NULL, mst_stream_output_consumed, stream); -+ h2_beam_on_produced(stream->output, mst_output_produced, stream); - if (stream->task->output.copy_files) { - h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL); - } - - /* we might see some file buckets in the output, see - * if we have enough handles reserved. */ -- check_data_for(m, stream, 1); -+ mst_check_data_for(m, stream, 1); - return APR_SUCCESS; - } - --apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) -+apr_status_t h2_mplx_t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) - { - apr_status_t status; - -@@ -556,14 +562,14 @@ apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) - status = APR_ECONNABORTED; - } - else { -- status = out_open(m, stream_id, beam); -+ status = t_out_open(m, stream_id, beam); - } - - H2_MPLX_LEAVE(m); - return status; - } - --static apr_status_t out_close(h2_mplx *m, h2_task *task) -+static apr_status_t s_out_close(h2_mplx *m, h2_task *task) - { - apr_status_t status = APR_SUCCESS; - h2_stream *stream; -@@ -580,17 +586,17 @@ static apr_status_t out_close(h2_mplx *m, h2_task *task) - return APR_ECONNABORTED; - } - -- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c, - "h2_mplx(%s): close", task->id); - status = h2_beam_close(task->output.beam); -- h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close"); -- output_consumed_signal(m, task); -- check_data_for(m, stream, 1); -+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close"); -+ s_output_consumed_signal(m, task); -+ mst_check_data_for(m, stream, 1); - return status; - } - --apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, -- apr_thread_cond_t *iowait) -+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout, -+ apr_thread_cond_t *iowait) - { - apr_status_t status; - -@@ -599,12 +605,12 @@ apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, - if (m->aborted) { - status = APR_ECONNABORTED; - } -- else if (h2_mplx_has_master_events(m)) { -+ else if (h2_mplx_m_has_master_events(m)) { - status = APR_SUCCESS; - } - else { -- purge_streams(m, 0); -- h2_ihash_iter(m->streams, report_consumption_iter, m); -+ m_purge_streams(m, 0); -+ h2_ihash_iter(m->streams, m_report_consumption_iter, m); - m->added_output = iowait; - status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout); - if (APLOGctrace2(m->c)) { -@@ -619,7 +625,7 @@ apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, - return status; - } - --static void check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked) -+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked) - { - /* If m->lock is already held, we must release during h2_ififo_push() - * which can wait on its not_full condition, causing a deadlock because -@@ -639,7 +645,7 @@ static void check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked) - } - } - --apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) -+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) - { - apr_status_t status; - -@@ -659,22 +665,22 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) - return status; - } - --static void register_if_needed(h2_mplx *m) -+static void ms_register_if_needed(h2_mplx *m, int from_master) - { - if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) { - apr_status_t status = h2_workers_register(m->workers, m); - if (status == APR_SUCCESS) { - m->is_registered = 1; - } -- else { -+ else if (from_master) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021) - "h2_mplx(%ld): register at workers", m->id); - } - } - } - --apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, -- h2_stream_pri_cmp *cmp, void *ctx) -+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream, -+ h2_stream_pri_cmp *cmp, void *ctx) - { - apr_status_t status; - -@@ -688,13 +694,13 @@ apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, - h2_ihash_add(m->streams, stream); - if (h2_stream_is_ready(stream)) { - /* already have a response */ -- check_data_for(m, stream, 1); -+ mst_check_data_for(m, stream, 1); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - H2_STRM_MSG(stream, "process, add to readyq")); - } - else { - h2_iq_add(m->q, stream->id, cmp, ctx); -- register_if_needed(m); -+ ms_register_if_needed(m, 1); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, - H2_STRM_MSG(stream, "process, added to q")); - } -@@ -704,7 +710,7 @@ apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, - return status; - } - --static h2_task *next_stream_task(h2_mplx *m) -+static h2_task *s_next_stream_task(h2_mplx *m) - { - h2_stream *stream; - int sid; -@@ -713,15 +719,15 @@ static h2_task *next_stream_task(h2_mplx *m) - - stream = h2_ihash_get(m->streams, sid); - if (stream) { -- conn_rec *slave, **pslave; -+ conn_rec *secondary, **psecondary; - -- pslave = (conn_rec **)apr_array_pop(m->spare_slaves); -- if (pslave) { -- slave = *pslave; -- slave->aborted = 0; -+ psecondary = (conn_rec **)apr_array_pop(m->spare_secondary); -+ if (psecondary) { -+ secondary = *psecondary; -+ secondary->aborted = 0; - } - else { -- slave = h2_slave_create(m->c, stream->id, m->pool); -+ secondary = h2_secondary_create(m->c, stream->id, m->pool); - } - - if (!stream->task) { -@@ -729,16 +735,16 @@ static h2_task *next_stream_task(h2_mplx *m) - m->max_stream_started = sid; - } - if (stream->input) { -- h2_beam_on_consumed(stream->input, stream_input_ev, -- stream_input_consumed, stream); -+ h2_beam_on_consumed(stream->input, mst_stream_input_ev, -+ m_stream_input_consumed, stream); - } - -- stream->task = h2_task_create(slave, stream->id, -+ stream->task = h2_task_create(secondary, stream->id, - stream->request, m, stream->input, - stream->session->s->timeout, - m->stream_max_mem); - if (!stream->task) { -- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave, -+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, secondary, - H2_STRM_LOG(APLOGNO(02941), stream, - "create task")); - return NULL; -@@ -753,7 +759,7 @@ static h2_task *next_stream_task(h2_mplx *m) - return NULL; - } - --apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask) -+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask) - { - apr_status_t rv = APR_EOF; - -@@ -769,7 +775,7 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask) - rv = APR_EOF; - } - else { -- *ptask = next_stream_task(m); -+ *ptask = s_next_stream_task(m); - rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS; - } - if (APR_EAGAIN != rv) { -@@ -779,22 +785,22 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask) - return rv; - } - --static void task_done(h2_mplx *m, h2_task *task) -+static void s_task_done(h2_mplx *m, h2_task *task) - { - h2_stream *stream; - -- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, - "h2_mplx(%ld): task(%s) done", m->id, task->id); -- out_close(m, task); -+ s_out_close(m, task); - - task->worker_done = 1; - task->done_at = apr_time_now(); -- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, - "h2_mplx(%s): request done, %f ms elapsed", task->id, - (task->done_at - task->started_at) / 1000.0); - - if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) { -- mplx_be_happy(m); -+ s_mplx_be_happy(m, task); - } - - ap_assert(task->done_done == 0); -@@ -806,60 +812,60 @@ static void task_done(h2_mplx *m, h2_task *task) - /* reset and schedule again */ - h2_task_redo(task); - h2_iq_add(m->q, stream->id, NULL, NULL); -- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, - H2_STRM_MSG(stream, "redo, added to q")); - } - else { - /* stream not cleaned up, stay around */ - task->done_done = 1; -- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, - H2_STRM_MSG(stream, "task_done, stream open")); - if (stream->input) { - h2_beam_leave(stream->input); - } - - /* more data will not arrive, resume the stream */ -- check_data_for(m, stream, 1); -+ mst_check_data_for(m, stream, 1); - } - } - else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) { - /* stream is done, was just waiting for this. */ - task->done_done = 1; -- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, - H2_STRM_MSG(stream, "task_done, in hold")); - if (stream->input) { - h2_beam_leave(stream->input); - } -- stream_joined(m, stream); -+ ms_stream_joined(m, stream); - } - else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) { -- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, - H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge")); - ap_assert("stream should not be in spurge" == NULL); - } - else { -- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518) -+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518) - "h2_mplx(%s): task_done, stream not found", - task->id); - ap_assert("stream should still be available" == NULL); - } - } - --void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) -+void h2_mplx_s_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) - { - H2_MPLX_ENTER_ALWAYS(m); - - --m->tasks_active; -- task_done(m, task); -+ s_task_done(m, task); - - if (m->join_wait) { - apr_thread_cond_signal(m->join_wait); - } - if (ptask) { - /* caller wants another task */ -- *ptask = next_stream_task(m); -+ *ptask = s_next_stream_task(m); - } -- register_if_needed(m); -+ ms_register_if_needed(m, 0); - - H2_MPLX_LEAVE(m); - } -@@ -868,7 +874,7 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) - * h2_mplx DoS protection - ******************************************************************************/ - --static int timed_out_busy_iter(void *data, void *val) -+static int m_timed_out_busy_iter(void *data, void *val) - { - stream_iter_ctx *ctx = data; - h2_stream *stream = val; -@@ -881,17 +887,17 @@ static int timed_out_busy_iter(void *data, void *val) - return 1; - } - --static h2_stream *get_timed_out_busy_stream(h2_mplx *m) -+static h2_stream *m_get_timed_out_busy_stream(h2_mplx *m) - { - stream_iter_ctx ctx; - ctx.m = m; - ctx.stream = NULL; - ctx.now = apr_time_now(); -- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx); -+ h2_ihash_iter(m->streams, m_timed_out_busy_iter, &ctx); - return ctx.stream; - } - --static int latest_repeatable_unsubmitted_iter(void *data, void *val) -+static int m_latest_repeatable_unsubmitted_iter(void *data, void *val) - { - stream_iter_ctx *ctx = data; - h2_stream *stream = val; -@@ -917,7 +923,7 @@ leave: - return 1; - } - --static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m) -+static apr_status_t m_assess_task_to_throttle(h2_task **ptask, h2_mplx *m) - { - stream_iter_ctx ctx; - -@@ -927,7 +933,7 @@ static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m) - ctx.m = m; - ctx.stream = NULL; - ctx.count = 0; -- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx); -+ h2_ihash_iter(m->streams, m_latest_repeatable_unsubmitted_iter, &ctx); - if (m->tasks_active - ctx.count > m->limit_active) { - /* we are above the limit of running tasks, accounting for the ones - * already throttled. */ -@@ -936,7 +942,7 @@ static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m) - return APR_EAGAIN; - } - /* above limit, be seeing no candidate for easy throttling */ -- if (get_timed_out_busy_stream(m)) { -+ if (m_get_timed_out_busy_stream(m)) { - /* Too many busy workers, unable to cancel enough streams - * and with a busy, timed out stream, we tell the client - * to go away... */ -@@ -946,7 +952,7 @@ static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m) - return APR_SUCCESS; - } - --static apr_status_t unschedule_slow_tasks(h2_mplx *m) -+static apr_status_t m_unschedule_slow_tasks(h2_mplx *m) - { - h2_task *task; - apr_status_t rv; -@@ -954,7 +960,7 @@ static apr_status_t unschedule_slow_tasks(h2_mplx *m) - /* Try to get rid of streams that occupy workers. Look for safe requests - * that are repeatable. If none found, fail the connection. - */ -- while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) { -+ while (APR_EAGAIN == (rv = m_assess_task_to_throttle(&task, m))) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - "h2_mplx(%s): unschedule, resetting task for redo later", - task->id); -@@ -965,7 +971,7 @@ static apr_status_t unschedule_slow_tasks(h2_mplx *m) - return rv; - } - --static apr_status_t mplx_be_happy(h2_mplx *m) -+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task) - { - apr_time_t now; - -@@ -977,14 +983,14 @@ static apr_status_t mplx_be_happy(h2_mplx *m) - m->limit_active = H2MIN(m->limit_active * 2, m->max_active); - m->last_mood_change = now; - m->irritations_since = 0; -- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, -+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, - "h2_mplx(%ld): mood update, increasing worker limit to %d", - m->id, m->limit_active); - } - return APR_SUCCESS; - } - --static apr_status_t mplx_be_annoyed(h2_mplx *m) -+static apr_status_t m_be_annoyed(h2_mplx *m) - { - apr_status_t status = APR_SUCCESS; - apr_time_t now; -@@ -1015,12 +1021,12 @@ static apr_status_t mplx_be_annoyed(h2_mplx *m) - } - - if (m->tasks_active > m->limit_active) { -- status = unschedule_slow_tasks(m); -+ status = m_unschedule_slow_tasks(m); - } - return status; - } - --apr_status_t h2_mplx_idle(h2_mplx *m) -+apr_status_t h2_mplx_m_idle(h2_mplx *m) - { - apr_status_t status = APR_SUCCESS; - apr_size_t scount; -@@ -1042,7 +1048,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m) - * of busy workers we allow for this connection until it - * well behaves. - */ -- status = mplx_be_annoyed(m); -+ status = m_be_annoyed(m); - } - else if (!h2_iq_empty(m->q)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, -@@ -1072,14 +1078,14 @@ apr_status_t h2_mplx_idle(h2_mplx *m) - h2_beam_is_closed(stream->output), - (long)h2_beam_get_buffered(stream->output)); - h2_ihash_add(m->streams, stream); -- check_data_for(m, stream, 1); -+ mst_check_data_for(m, stream, 1); - stream->out_checked = 1; - status = APR_EAGAIN; - } - } - } - } -- register_if_needed(m); -+ ms_register_if_needed(m, 1); - - H2_MPLX_LEAVE(m); - return status; -@@ -1089,14 +1095,13 @@ apr_status_t h2_mplx_idle(h2_mplx *m) - * mplx master events dispatching - ******************************************************************************/ - --int h2_mplx_has_master_events(h2_mplx *m) -+int h2_mplx_m_has_master_events(h2_mplx *m) - { - return apr_atomic_read32(&m->event_pending) > 0; - } - --apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, -- stream_ev_callback *on_resume, -- void *on_ctx) -+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume, -+ void *on_ctx) - { - h2_stream *stream; - int n, id; -@@ -1106,8 +1111,8 @@ apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, - apr_atomic_set32(&m->event_pending, 0); - - /* update input windows for streams */ -- h2_ihash_iter(m->streams, report_consumption_iter, m); -- purge_streams(m, 1); -+ h2_ihash_iter(m->streams, m_report_consumption_iter, m); -+ m_purge_streams(m, 1); - - n = h2_ififo_count(m->readyq); - while (n > 0 -@@ -1122,13 +1127,13 @@ apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, - return APR_SUCCESS; - } - --apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream) -+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, h2_stream *stream) - { -- check_data_for(m, stream, 0); -+ mst_check_data_for(m, stream, 0); - return APR_SUCCESS; - } - --int h2_mplx_awaits_data(h2_mplx *m) -+int h2_mplx_m_awaits_data(h2_mplx *m) - { - int waiting = 1; - -@@ -1145,7 +1150,7 @@ int h2_mplx_awaits_data(h2_mplx *m) - return waiting; - } - --apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id) -+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id) - { - h2_stream *stream; - apr_status_t status = APR_SUCCESS; -@@ -1153,7 +1158,7 @@ apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id) - H2_MPLX_ENTER_ALWAYS(m); - stream = h2_ihash_get(m->streams, stream_id); - if (stream && stream->task) { -- status = mplx_be_annoyed(m); -+ status = m_be_annoyed(m); - } - H2_MPLX_LEAVE(m); - return status; -diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h -index 8a4f63f..c61629d 100644 ---- a/modules/http2/h2_mplx.h -+++ b/modules/http2/h2_mplx.h -@@ -31,8 +31,10 @@ - * queued in the multiplexer. If a task thread tries to write more - * data, it is blocked until space becomes available. - * -- * Writing input is never blocked. In order to use flow control on the input, -- * the mplx can be polled for input data consumption. -+ * Naming Convention: -+ * "h2_mplx_m_" are methods only to be called by the main connection -+ * "h2_mplx_s_" are method only to be called by a secondary connection -+ * "h2_mplx_t_" are method only to be called by a task handler (can be master or secondary) - */ - - struct apr_pool_t; -@@ -88,25 +90,23 @@ struct h2_mplx { - apr_size_t stream_max_mem; - - apr_pool_t *spare_io_pool; -- apr_array_header_t *spare_slaves; /* spare slave connections */ -+ apr_array_header_t *spare_secondary; /* spare secondary connections */ - - struct h2_workers *workers; - }; - -- -- - /******************************************************************************* -- * Object lifecycle and information. -+ * From the main connection processing: h2_mplx_m_* - ******************************************************************************/ - --apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s); -+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s); - - /** - * Create the multiplexer for the given HTTP2 session. - * Implicitly has reference count 1. - */ --h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *master, -- struct h2_workers *workers); -+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *master, -+ struct h2_workers *workers); - - /** - * Decreases the reference counter of this mplx and waits for it -@@ -116,26 +116,14 @@ h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *master, - * @param m the mplx to be released and destroyed - * @param wait condition var to wait on for ref counter == 0 - */ --void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait); -- --apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask); -- --void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask); -+void h2_mplx_m_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait); - - /** - * Shut down the multiplexer gracefully. Will no longer schedule new streams - * but let the ongoing ones finish normally. - * @return the highest stream id being/been processed - */ --int h2_mplx_shutdown(h2_mplx *m); -- --int h2_mplx_is_busy(h2_mplx *m); -- --/******************************************************************************* -- * IO lifetime of streams. -- ******************************************************************************/ -- --struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id); -+int h2_mplx_m_shutdown(h2_mplx *m); - - /** - * Notifies mplx that a stream has been completely handled on the main -@@ -144,20 +132,16 @@ struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id); - * @param m the mplx itself - * @param stream the stream ready for cleanup - */ --apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream); -+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, struct h2_stream *stream); - - /** - * Waits on output data from any stream in this session to become available. - * Returns APR_TIMEUP if no data arrived in the given time. - */ --apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, -- struct apr_thread_cond_t *iowait); -- --apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream); -+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout, -+ struct apr_thread_cond_t *iowait); - --/******************************************************************************* -- * Stream processing. -- ******************************************************************************/ -+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, struct h2_stream *stream); - - /** - * Process a stream request. -@@ -168,8 +152,8 @@ apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream); - * @param cmp the stream priority compare function - * @param ctx context data for the compare function - */ --apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, -- h2_stream_pri_cmp *cmp, void *ctx); -+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream, -+ h2_stream_pri_cmp *cmp, void *ctx); - - /** - * Stream priorities have changed, reschedule pending requests. -@@ -178,7 +162,7 @@ apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, - * @param cmp the stream priority compare function - * @param ctx context data for the compare function - */ --apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx); -+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx); - - typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream); - -@@ -186,7 +170,7 @@ typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream); - * Check if the multiplexer has events for the master connection pending. - * @return != 0 iff there are events pending - */ --int h2_mplx_has_master_events(h2_mplx *m); -+int h2_mplx_m_has_master_events(h2_mplx *m); - - /** - * Dispatch events for the master connection, such as -@@ -194,108 +178,46 @@ int h2_mplx_has_master_events(h2_mplx *m); - * @param on_resume new output data has arrived for a suspended stream - * @param ctx user supplied argument to invocation. - */ --apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, -- stream_ev_callback *on_resume, -- void *ctx); -+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume, -+ void *ctx); - --int h2_mplx_awaits_data(h2_mplx *m); -+int h2_mplx_m_awaits_data(h2_mplx *m); - - typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx); - --apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx); -+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx); - --apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id); -- --/******************************************************************************* -- * Output handling of streams. -- ******************************************************************************/ -+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id); - - /** -- * Opens the output for the given stream with the specified response. -+ * Master connection has entered idle mode. -+ * @param m the mplx instance of the master connection -+ * @return != SUCCESS iff connection should be terminated - */ --apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id, -- struct h2_bucket_beam *beam); -+apr_status_t h2_mplx_m_idle(h2_mplx *m); - - /******************************************************************************* -- * h2_mplx list Manipulation. -+ * From a secondary connection processing: h2_mplx_s_* - ******************************************************************************/ -+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, struct h2_task **ptask); -+void h2_mplx_s_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask); - --/** -- * The magic pointer value that indicates the head of a h2_mplx list -- * @param b The mplx list -- * @return The magic pointer value -- */ --#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link) -- --/** -- * Determine if the mplx list is empty -- * @param b The list to check -- * @return true or false -- */ --#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link) -- --/** -- * Return the first mplx in a list -- * @param b The list to query -- * @return The first mplx in the list -- */ --#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b) -- --/** -- * Return the last mplx in a list -- * @param b The list to query -- * @return The last mplx int he list -- */ --#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b) -- --/** -- * Insert a single mplx at the front of a list -- * @param b The list to add to -- * @param e The mplx to insert -- */ --#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \ --h2_mplx *ap__b = (e); \ --APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \ --} while (0) -- --/** -- * Insert a single mplx at the end of a list -- * @param b The list to add to -- * @param e The mplx to insert -- */ --#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \ --h2_mplx *ap__b = (e); \ --APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \ --} while (0) -+/******************************************************************************* -+ * From a h2_task owner: h2_mplx_s_* -+ * (a task is transfered from master to secondary connection and back in -+ * its normal lifetime). -+ ******************************************************************************/ - - /** -- * Get the next mplx in the list -- * @param e The current mplx -- * @return The next mplx -- */ --#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link) --/** -- * Get the previous mplx in the list -- * @param e The current mplx -- * @return The previous mplx -+ * Opens the output for the given stream with the specified response. - */ --#define H2_MPLX_PREV(e) APR_RING_PREV((e), link) -+apr_status_t h2_mplx_t_out_open(h2_mplx *mplx, int stream_id, -+ struct h2_bucket_beam *beam); - - /** -- * Remove a mplx from its list -- * @param e The mplx to remove -+ * Get the stream that belongs to the given task. - */ --#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link) -- --/******************************************************************************* -- * h2_mplx DoS protection -- ******************************************************************************/ -+struct h2_stream *h2_mplx_t_stream_get(h2_mplx *m, struct h2_task *task); - --/** -- * Master connection has entered idle mode. -- * @param m the mplx instance of the master connection -- * @return != SUCCESS iff connection should be terminated -- */ --apr_status_t h2_mplx_idle(h2_mplx *m); - - #endif /* defined(__mod_h2__h2_mplx__) */ -diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c -index 6394502..202d560 100644 ---- a/modules/http2/h2_request.c -+++ b/modules/http2/h2_request.c -@@ -288,6 +288,9 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) - if (r->method_number == M_GET && r->method[0] == 'H') { - r->header_only = 1; - } -+ r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0", -+ req->method, req->path ? req->path : ""); -+ r->headers_in = apr_table_clone(r->pool, req->headers); - - rpath = (req->path ? req->path : ""); - ap_parse_uri(r, rpath); -@@ -304,7 +307,9 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) - */ - r->hostname = NULL; - ap_update_vhost_from_headers(r); -- -+ r->protocol = "HTTP/2.0"; -+ r->proto_num = HTTP_VERSION(2, 0); -+ - /* we may have switched to another server */ - r->per_dir_config = r->server->lookup_defaults; - -diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c -index de54ac7..d657fce 100644 ---- a/modules/http2/h2_session.c -+++ b/modules/http2/h2_session.c -@@ -106,7 +106,7 @@ static int rst_unprocessed_stream(h2_stream *stream, void *ctx) - - static void cleanup_unprocessed_streams(h2_session *session) - { -- h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session); -+ h2_mplx_m_stream_do(session->mplx, rst_unprocessed_stream, session); - } - - static h2_stream *h2_session_open_stream(h2_session *session, int stream_id, -@@ -397,7 +397,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, - else { - /* A stream reset on a request it sent us. Could happen in a browser - * when the user navigates away or cancels loading - maybe. */ -- h2_mplx_client_rst(session->mplx, frame->hd.stream_id); -+ h2_mplx_m_client_rst(session->mplx, frame->hd.stream_id); - ++session->streams_reset; - } - break; -@@ -467,7 +467,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, - } - - static int h2_session_continue_data(h2_session *session) { -- if (h2_mplx_has_master_events(session->mplx)) { -+ if (h2_mplx_m_has_master_events(session->mplx)) { - return 0; - } - if (h2_conn_io_needs_flush(&session->io)) { -@@ -729,7 +729,7 @@ static apr_status_t h2_session_shutdown(h2_session *session, int error, - * Remove all streams greater than this number without submitting - * a RST_STREAM frame, since that should be clear from the GOAWAY - * we send. */ -- session->local.accepted_max = h2_mplx_shutdown(session->mplx); -+ session->local.accepted_max = h2_mplx_m_shutdown(session->mplx); - session->local.error = error; - } - else { -@@ -779,7 +779,7 @@ static apr_status_t session_cleanup(h2_session *session, const char *trigger) - } - - transit(session, trigger, H2_SESSION_ST_CLEANUP); -- h2_mplx_release_and_join(session->mplx, session->iowait); -+ h2_mplx_m_release_and_join(session->mplx, session->iowait); - session->mplx = NULL; - - ap_assert(session->ngh2); -@@ -800,7 +800,7 @@ static apr_status_t session_pool_cleanup(void *data) - /* if the session is still there, now is the last chance - * to perform cleanup. Normally, cleanup should have happened - * earlier in the connection pre_close. Main reason is that -- * any ongoing requests on slave connections might still access -+ * any ongoing requests on secondary connections might still access - * data which has, at this time, already been freed. An example - * is mod_ssl that uses request hooks. */ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, -@@ -893,7 +893,7 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec * - session->monitor->on_state_event = on_stream_state_event; - session->monitor->on_event = on_stream_event; - -- session->mplx = h2_mplx_create(c, s, session->pool, workers); -+ session->mplx = h2_mplx_m_create(c, s, session->pool, workers); - - /* connection input filter that feeds the session */ - session->cin = h2_filter_cin_create(session); -@@ -1552,7 +1552,7 @@ static void h2_session_in_flush(h2_session *session) - if (stream) { - ap_assert(!stream->scheduled); - if (h2_stream_prep_processing(stream) == APR_SUCCESS) { -- h2_mplx_process(session->mplx, stream, stream_pri_cmp, session); -+ h2_mplx_m_process(session->mplx, stream, stream_pri_cmp, session); - } - else { - h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); -@@ -1824,7 +1824,7 @@ static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg) - session->open_streams); - h2_conn_io_flush(&session->io); - if (session->open_streams > 0) { -- if (h2_mplx_awaits_data(session->mplx)) { -+ if (h2_mplx_m_awaits_data(session->mplx)) { - /* waiting for at least one stream to produce data */ - transit(session, "no io", H2_SESSION_ST_WAIT); - } -@@ -1983,7 +1983,7 @@ static void on_stream_state_enter(void *ctx, h2_stream *stream) - break; - case H2_SS_CLEANUP: - nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL); -- h2_mplx_stream_cleanup(session->mplx, stream); -+ h2_mplx_m_stream_cleanup(session->mplx, stream); - break; - default: - break; -@@ -2073,7 +2073,7 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev, - static apr_status_t dispatch_master(h2_session *session) { - apr_status_t status; - -- status = h2_mplx_dispatch_master_events(session->mplx, -+ status = h2_mplx_m_dispatch_master_events(session->mplx, - on_stream_resume, session); - if (status == APR_EAGAIN) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c, -@@ -2175,7 +2175,7 @@ apr_status_t h2_session_process(h2_session *session, int async) - session->have_read = 1; - } - else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) { -- status = h2_mplx_idle(session->mplx); -+ status = h2_mplx_m_idle(session->mplx); - if (status == APR_EAGAIN) { - break; - } -@@ -2205,7 +2205,7 @@ apr_status_t h2_session_process(h2_session *session, int async) - /* We wait in smaller increments, using a 1 second timeout. - * That gives us the chance to check for MPMQ_STOPPING often. - */ -- status = h2_mplx_idle(session->mplx); -+ status = h2_mplx_m_idle(session->mplx); - if (status == APR_EAGAIN) { - break; - } -@@ -2319,7 +2319,7 @@ apr_status_t h2_session_process(h2_session *session, int async) - "h2_session: wait for data, %ld micros", - (long)session->wait_us); - } -- status = h2_mplx_out_trywait(session->mplx, session->wait_us, -+ status = h2_mplx_m_out_trywait(session->mplx, session->wait_us, - session->iowait); - if (status == APR_SUCCESS) { - session->wait_us = 0; -@@ -2356,7 +2356,7 @@ apr_status_t h2_session_process(h2_session *session, int async) - dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL); - } - if (session->reprioritize) { -- h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session); -+ h2_mplx_m_reprioritize(session->mplx, stream_pri_cmp, session); - session->reprioritize = 0; - } - } -diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h -index 1bf6f05..4f74b56 100644 ---- a/modules/http2/h2_session.h -+++ b/modules/http2/h2_session.h -@@ -132,7 +132,7 @@ typedef struct h2_session { - const char *last_status_msg; /* the one already reported */ - - struct h2_iqueue *in_pending; /* all streams with input pending */ -- struct h2_iqueue *in_process; /* all streams ready for processing on slave */ -+ struct h2_iqueue *in_process; /* all streams ready for processing on a secondary */ - - } h2_session; - -diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c -index 4603513..eb61add 100644 ---- a/modules/http2/h2_stream.c -+++ b/modules/http2/h2_stream.c -@@ -911,7 +911,7 @@ apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, - - if (status == APR_EAGAIN) { - /* TODO: ugly, someone needs to retrieve the response first */ -- h2_mplx_keep_active(stream->session->mplx, stream); -+ h2_mplx_m_keep_active(stream->session->mplx, stream); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - H2_STRM_MSG(stream, "prep, response eagain")); - return status; -diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c -index d610895..46d4a57 100644 ---- a/modules/http2/h2_task.c -+++ b/modules/http2/h2_task.c -@@ -86,7 +86,7 @@ static apr_status_t open_output(h2_task *task) - task->request->authority, - task->request->path); - task->output.opened = 1; -- return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam); -+ return h2_mplx_t_out_open(task->mplx, task->stream_id, task->output.beam); - } - - static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block) -@@ -126,8 +126,8 @@ static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block) - * request_rec out filter chain) into the h2_mplx for further sending - * on the master connection. - */ --static apr_status_t slave_out(h2_task *task, ap_filter_t* f, -- apr_bucket_brigade* bb) -+static apr_status_t secondary_out(h2_task *task, ap_filter_t* f, -+ apr_bucket_brigade* bb) - { - apr_bucket *b; - apr_status_t rv = APR_SUCCESS; -@@ -175,7 +175,7 @@ send: - if (APR_SUCCESS == rv) { - /* could not write all, buffer the rest */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405) -- "h2_slave_out(%s): saving brigade", task->id); -+ "h2_secondary_out(%s): saving brigade", task->id); - ap_assert(NULL); - rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool); - flush = 1; -@@ -189,7 +189,7 @@ send: - } - out: - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c, -- "h2_slave_out(%s): slave_out leave", task->id); -+ "h2_secondary_out(%s): secondary_out leave", task->id); - return rv; - } - -@@ -202,14 +202,14 @@ static apr_status_t output_finish(h2_task *task) - } - - /******************************************************************************* -- * task slave connection filters -+ * task secondary connection filters - ******************************************************************************/ - --static apr_status_t h2_filter_slave_in(ap_filter_t* f, -- apr_bucket_brigade* bb, -- ap_input_mode_t mode, -- apr_read_type_e block, -- apr_off_t readbytes) -+static apr_status_t h2_filter_secondary_in(ap_filter_t* f, -+ apr_bucket_brigade* bb, -+ ap_input_mode_t mode, -+ apr_read_type_e block, -+ apr_off_t readbytes) - { - h2_task *task; - apr_status_t status = APR_SUCCESS; -@@ -224,7 +224,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, - - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, -- "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld", -+ "h2_secondary_in(%s): read, mode=%d, block=%d, readbytes=%ld", - task->id, mode, block, (long)readbytes); - } - -@@ -254,7 +254,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, - /* Get more input data for our request. */ - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, -- "h2_slave_in(%s): get more data from mplx, block=%d, " -+ "h2_secondary_in(%s): get more data from mplx, block=%d, " - "readbytes=%ld", task->id, block, (long)readbytes); - } - if (task->input.beam) { -@@ -267,7 +267,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, - - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, -- "h2_slave_in(%s): read returned", task->id); -+ "h2_secondary_in(%s): read returned", task->id); - } - if (APR_STATUS_IS_EAGAIN(status) - && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) { -@@ -306,7 +306,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, - if (APR_BRIGADE_EMPTY(task->input.bb)) { - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, -- "h2_slave_in(%s): no data", task->id); -+ "h2_secondary_in(%s): no data", task->id); - } - return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF; - } -@@ -334,7 +334,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, - buffer[len] = 0; - if (trace1) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, -- "h2_slave_in(%s): getline: %s", -+ "h2_secondary_in(%s): getline: %s", - task->id, buffer); - } - } -@@ -344,7 +344,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, - * to support it. Seems to work. */ - ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, - APLOGNO(03472) -- "h2_slave_in(%s), unsupported READ mode %d", -+ "h2_secondary_in(%s), unsupported READ mode %d", - task->id, mode); - status = APR_ENOTIMPL; - } -@@ -352,19 +352,19 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, - if (trace1) { - apr_brigade_length(bb, 0, &bblen); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, -- "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen); -+ "h2_secondary_in(%s): %ld data bytes", task->id, (long)bblen); - } - return status; - } - --static apr_status_t h2_filter_slave_output(ap_filter_t* filter, -- apr_bucket_brigade* brigade) -+static apr_status_t h2_filter_secondary_output(ap_filter_t* filter, -+ apr_bucket_brigade* brigade) - { - h2_task *task = h2_ctx_get_task(filter->c); - apr_status_t status; - - ap_assert(task); -- status = slave_out(task, filter, brigade); -+ status = secondary_out(task, filter, brigade); - if (status != APR_SUCCESS) { - h2_task_rst(task, H2_ERR_INTERNAL_ERROR); - } -@@ -456,9 +456,9 @@ void h2_task_register_hooks(void) - ap_hook_process_connection(h2_task_process_conn, - NULL, NULL, APR_HOOK_FIRST); - -- ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in, -+ ap_register_input_filter("H2_SECONDARY_IN", h2_filter_secondary_in, - NULL, AP_FTYPE_NETWORK); -- ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output, -+ ap_register_output_filter("H2_SECONDARY_OUT", h2_filter_secondary_output, - NULL, AP_FTYPE_NETWORK); - ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1, - NULL, AP_FTYPE_NETWORK); -@@ -492,15 +492,15 @@ static int h2_task_pre_conn(conn_rec* c, void *arg) - (void)arg; - if (ctx->task) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, -- "h2_slave(%s), pre_connection, adding filters", c->log_id); -- ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c); -+ "h2_secondary(%s), pre_connection, adding filters", c->log_id); -+ ap_add_input_filter("H2_SECONDARY_IN", NULL, NULL, c); - ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c); -- ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c); -+ ap_add_output_filter("H2_SECONDARY_OUT", NULL, NULL, c); - } - return OK; - } - --h2_task *h2_task_create(conn_rec *slave, int stream_id, -+h2_task *h2_task_create(conn_rec *secondary, int stream_id, - const h2_request *req, h2_mplx *m, - h2_bucket_beam *input, - apr_interval_time_t timeout, -@@ -509,10 +509,10 @@ h2_task *h2_task_create(conn_rec *slave, int stream_id, - apr_pool_t *pool; - h2_task *task; - -- ap_assert(slave); -+ ap_assert(secondary); - ap_assert(req); - -- apr_pool_create(&pool, slave->pool); -+ apr_pool_create(&pool, secondary->pool); - apr_pool_tag(pool, "h2_task"); - task = apr_pcalloc(pool, sizeof(h2_task)); - if (task == NULL) { -@@ -520,7 +520,7 @@ h2_task *h2_task_create(conn_rec *slave, int stream_id, - } - task->id = "000"; - task->stream_id = stream_id; -- task->c = slave; -+ task->c = secondary; - task->mplx = m; - task->pool = pool; - task->request = req; -@@ -559,7 +559,7 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) - * - * Each conn_rec->id is supposed to be unique at a point in time. Since - * some modules (and maybe external code) uses this id as an identifier -- * for the request_rec they handle, it needs to be unique for slave -+ * for the request_rec they handle, it needs to be unique for secondary - * connections also. - * - * The MPM module assigns the connection ids and mod_unique_id is using -@@ -567,7 +567,7 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) - * works for HTTP/1.x, the parallel execution of several requests per - * connection will generate duplicate identifiers on load. - * -- * The original implementation for slave connection identifiers used -+ * The original implementation for secondary connection identifiers used - * to shift the master connection id up and assign the stream id to the - * lower bits. This was cramped on 32 bit systems, but on 64bit there was - * enough space. -@@ -599,7 +599,7 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) - h2_ctx_create_for(c, task); - apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id); - -- h2_slave_run_pre_connection(c, ap_get_conn_socket(c)); -+ h2_secondary_run_pre_connection(c, ap_get_conn_socket(c)); - - task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc); - if (task->request->serialize) { -@@ -707,7 +707,7 @@ static int h2_task_process_conn(conn_rec* c) - } - else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, -- "slave_conn(%ld): has no task", c->id); -+ "secondary_conn(%ld): has no task", c->id); - } - return DECLINED; - } -diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h -index 6e8775f..efdbff6 100644 ---- a/modules/http2/h2_task.h -+++ b/modules/http2/h2_task.h -@@ -90,7 +90,7 @@ struct h2_task { - apr_bucket *eor; - }; - --h2_task *h2_task_create(conn_rec *slave, int stream_id, -+h2_task *h2_task_create(conn_rec *secondary, int stream_id, - const h2_request *req, struct h2_mplx *m, - struct h2_bucket_beam *input, - apr_interval_time_t timeout, -diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c -index 52f1a70..96c20a8 100644 ---- a/modules/http2/h2_workers.c -+++ b/modules/http2/h2_workers.c -@@ -155,7 +155,7 @@ static apr_status_t slot_pull_task(h2_slot *slot, h2_mplx *m) - { - apr_status_t rv; - -- rv = h2_mplx_pop_task(m, &slot->task); -+ rv = h2_mplx_s_pop_task(m, &slot->task); - if (slot->task) { - /* Ok, we got something to give back to the worker for execution. - * If we still have idle workers, we let the worker be sticky, -@@ -234,10 +234,10 @@ static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx) - * mplx the opportunity to give us back a new task right away. - */ - if (!slot->aborted && (--slot->sticks > 0)) { -- h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task); -+ h2_mplx_s_task_done(slot->task->mplx, slot->task, &slot->task); - } - else { -- h2_mplx_task_done(slot->task->mplx, slot->task, NULL); -+ h2_mplx_s_task_done(slot->task->mplx, slot->task, NULL); - slot->task = NULL; - } - } -diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c -index 5664f39..9f087ab 100644 ---- a/modules/http2/mod_http2.c -+++ b/modules/http2/mod_http2.c -@@ -237,7 +237,7 @@ static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s, - if (ctx) { - if (r) { - if (ctx->task) { -- h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id); -+ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task); - if (stream && stream->push_policy != H2_PUSH_NONE) { - return "on"; - } -@@ -271,7 +271,7 @@ static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s, - { - if (ctx) { - if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) { -- h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id); -+ h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task); - if (stream) { - return apr_itoa(p, stream->initiated_on); - } diff --git a/CVE-2020-9490.patch b/CVE-2020-9490.patch deleted file mode 100644 index d0ef962..0000000 --- a/CVE-2020-9490.patch +++ /dev/null @@ -1,394 +0,0 @@ -From f1e4032670b82a84a469f6506de9052fd9df54f8 Mon Sep 17 00:00:00 2001 -From: Stefan Eissing -Date: Wed, 29 Jul 2020 12:15:58 +0000 -Subject: [PATCH] *) mod_http2: remote support for abandoned http-wg draft - . - -git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1880395 13f79535-47bb-0310-9956-ffa450edef68 ---- - modules/http2/h2_push.c | 255 ++++--------------------------------- - modules/http2/h2_push.h | 54 +++++--- - 2 files changed, 64 insertions(+), 245 deletions(-) - -diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c -index 60488cf..dc21e1e 100644 ---- a/modules/http2/h2_push.c -+++ b/modules/http2/h2_push.c -@@ -464,33 +464,6 @@ apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req, - return NULL; - } - --/******************************************************************************* -- * push diary -- * -- * - The push diary keeps track of resources already PUSHed via HTTP/2 on this -- * connection. It records a hash value from the absolute URL of the resource -- * pushed. -- * - Lacking openssl, it uses 'apr_hashfunc_default' for the value -- * - with openssl, it uses SHA256 to calculate the hash value -- * - whatever the method to generate the hash, the diary keeps a maximum of 64 -- * bits per hash, limiting the memory consumption to about -- * H2PushDiarySize * 8 -- * bytes. Entries are sorted by most recently used and oldest entries are -- * forgotten first. -- * - Clients can initialize/replace the push diary by sending a 'Cache-Digest' -- * header. Currently, this is the base64url encoded value of the cache digest -- * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ -- * This draft can be expected to evolve and the definition of the header -- * will be added there and refined. -- * - The cache digest header is a Golomb Coded Set of hash values, but it may -- * limit the amount of bits per hash value even further. For a good description -- * of GCS, read here: -- * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters -- * - The means that the push diary might be initialized with hash values of much -- * less than 64 bits, leading to more false positives, but smaller digest size. -- ******************************************************************************/ -- -- - #define GCSLOG_LEVEL APLOG_TRACE1 - - typedef struct h2_push_diary_entry { -@@ -617,38 +590,48 @@ static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash) - return -1; - } - --static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx) -+static void move_to_last(h2_push_diary *diary, apr_size_t idx) - { - h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts; - h2_push_diary_entry e; -- apr_size_t lastidx = diary->entries->nelts-1; -+ int lastidx; - -+ /* Move an existing entry to the last place */ -+ if (diary->entries->nelts <= 0) -+ return; -+ - /* move entry[idx] to the end */ -+ lastidx = diary->entries->nelts - 1; - if (idx < lastidx) { - e = entries[idx]; -- memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx)); -+ memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx)); - entries[lastidx] = e; - } -- return &entries[lastidx]; - } - --static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) -+static void remove_first(h2_push_diary *diary) - { -- h2_push_diary_entry *ne; -+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts; -+ int lastidx; - -- if (diary->entries->nelts < diary->N) { -- /* append a new diary entry at the end */ -- APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e; -- ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry); -+ /* move remaining entries to index 0 */ -+ lastidx = diary->entries->nelts - 1; -+ if (lastidx > 0) { -+ --diary->entries->nelts; -+ memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts); - } -- else { -- /* replace content with new digest. keeps memory usage constant once diary is full */ -- ne = move_to_last(diary, 0); -- *ne = *e; -+} -+ -+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) -+{ -+ while (diary->entries->nelts >= diary->N) { -+ remove_first(diary); - } -+ /* append a new diary entry at the end */ -+ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e; - /* Intentional no APLOGNO */ - ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool, -- "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash); -+ "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash); - } - - apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes) -@@ -691,30 +674,12 @@ apr_array_header_t *h2_push_collect_update(h2_stream *stream, - const struct h2_request *req, - const struct h2_headers *res) - { -- h2_session *session = stream->session; -- const char *cache_digest = apr_table_get(req->headers, "Cache-Digest"); - apr_array_header_t *pushes; -- apr_status_t status; - -- if (cache_digest && session->push_diary) { -- status = h2_push_diary_digest64_set(session->push_diary, req->authority, -- cache_digest, stream->pool); -- if (status != APR_SUCCESS) { -- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, -- H2_SSSN_LOG(APLOGNO(03057), session, -- "push diary set from Cache-Digest: %s"), cache_digest); -- } -- } - pushes = h2_push_collect(stream->pool, req, stream->push_policy, res); - return h2_push_diary_update(stream->session, pushes); - } - --static apr_int32_t h2_log2inv(unsigned char log2) --{ -- return log2? (1 << log2) : 1; --} -- -- - typedef struct { - h2_push_diary *diary; - unsigned char log2p; -@@ -829,11 +794,6 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, - apr_size_t hash_count; - - nelts = diary->entries->nelts; -- -- if (nelts > APR_UINT32_MAX) { -- /* should not happen */ -- return APR_ENOTIMPL; -- } - N = ceil_power_of_2(nelts); - log2n = h2_log2(N); - -@@ -895,166 +855,3 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, - return APR_SUCCESS; - } - --typedef struct { -- h2_push_diary *diary; -- apr_pool_t *pool; -- unsigned char log2p; -- const unsigned char *data; -- apr_size_t datalen; -- apr_size_t offset; -- unsigned int bit; -- apr_uint64_t last_val; --} gset_decoder; -- --static int gset_decode_next_bit(gset_decoder *decoder) --{ -- if (++decoder->bit >= 8) { -- if (++decoder->offset >= decoder->datalen) { -- return -1; -- } -- decoder->bit = 0; -- } -- return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0; --} -- --static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash) --{ -- apr_uint64_t flex = 0, fixed = 0, delta; -- int i; -- -- /* read 1 bits until we encounter 0, then read log2n(diary-P) bits. -- * On a malformed bit-string, this will not fail, but produce results -- * which are pbly too large. Luckily, the diary will modulo the hash. -- */ -- while (1) { -- int bit = gset_decode_next_bit(decoder); -- if (bit == -1) { -- return APR_EINVAL; -- } -- if (!bit) { -- break; -- } -- ++flex; -- } -- -- for (i = 0; i < decoder->log2p; ++i) { -- int bit = gset_decode_next_bit(decoder); -- if (bit == -1) { -- return APR_EINVAL; -- } -- fixed = (fixed << 1) | bit; -- } -- -- delta = (flex << decoder->log2p) | fixed; -- *phash = delta + decoder->last_val; -- decoder->last_val = *phash; -- -- /* Intentional no APLOGNO */ -- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool, -- "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%" -- APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT, -- *phash, delta, (int)flex, fixed); -- -- return APR_SUCCESS; --} -- --/** -- * Initialize the push diary by a cache digest as described in -- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ -- * . -- * @param diary the diary to set the digest into -- * @param data the binary cache digest -- * @param len the length of the cache digest -- * @return APR_EINVAL if digest was not successfully parsed -- */ --apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, -- const char *data, apr_size_t len) --{ -- gset_decoder decoder; -- unsigned char log2n, log2p; -- int N, i; -- apr_pool_t *pool = diary->entries->pool; -- h2_push_diary_entry e; -- apr_status_t status = APR_SUCCESS; -- -- if (len < 2) { -- /* at least this should be there */ -- return APR_EINVAL; -- } -- log2n = data[0]; -- log2p = data[1]; -- diary->mask_bits = log2n + log2p; -- if (diary->mask_bits > 64) { -- /* cannot handle */ -- return APR_ENOTIMPL; -- } -- -- /* whatever is in the digest, it replaces the diary entries */ -- apr_array_clear(diary->entries); -- if (!authority || !strcmp("*", authority)) { -- diary->authority = NULL; -- } -- else if (!diary->authority || strcmp(diary->authority, authority)) { -- diary->authority = apr_pstrdup(diary->entries->pool, authority); -- } -- -- N = h2_log2inv(log2n + log2p); -- -- decoder.diary = diary; -- decoder.pool = pool; -- decoder.log2p = log2p; -- decoder.data = (const unsigned char*)data; -- decoder.datalen = len; -- decoder.offset = 1; -- decoder.bit = 8; -- decoder.last_val = 0; -- -- diary->N = N; -- /* Determine effective N we use for storage */ -- if (!N) { -- /* a totally empty cache digest. someone tells us that she has no -- * entries in the cache at all. Use our own preferences for N+mask -- */ -- diary->N = diary->NMax; -- return APR_SUCCESS; -- } -- else if (N > diary->NMax) { -- /* Store not more than diary is configured to hold. We open us up -- * to DOS attacks otherwise. */ -- diary->N = diary->NMax; -- } -- -- /* Intentional no APLOGNO */ -- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, -- "h2_push_diary_digest_set: N=%d, log2n=%d, " -- "diary->mask_bits=%d, dec.log2p=%d", -- (int)diary->N, (int)log2n, diary->mask_bits, -- (int)decoder.log2p); -- -- for (i = 0; i < diary->N; ++i) { -- if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) { -- /* the data may have less than N values */ -- break; -- } -- h2_push_diary_append(diary, &e); -- } -- -- /* Intentional no APLOGNO */ -- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, -- "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d", -- (int)diary->entries->nelts, diary->mask_bits); -- return status; --} -- --apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, -- const char *data64url, apr_pool_t *pool) --{ -- const char *data; -- apr_size_t len = h2_util_base64url_decode(&data, data64url, pool); -- /* Intentional no APLOGNO */ -- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, -- "h2_push_diary_digest64_set: digest=%s, dlen=%d", -- data64url, (int)len); -- return h2_push_diary_digest_set(diary, authority, data, len); --} -- -diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h -index bc24e68..d061dd8 100644 ---- a/modules/http2/h2_push.h -+++ b/modules/http2/h2_push.h -@@ -35,6 +35,44 @@ typedef enum { - H2_PUSH_DIGEST_SHA256 - } h2_push_digest_type; - -+/******************************************************************************* -+ * push diary -+ * -+ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this -+ * connection. It records a hash value from the absolute URL of the resource -+ * pushed. -+ * - Lacking openssl, -+ * - with openssl, it uses SHA256 to calculate the hash value, otherwise it -+ * falls back to apr_hashfunc_default() -+ * - whatever the method to generate the hash, the diary keeps a maximum of 64 -+ * bits per hash, limiting the memory consumption to about -+ * H2PushDiarySize * 8 -+ * bytes. Entries are sorted by most recently used and oldest entries are -+ * forgotten first. -+ * - While useful by itself to avoid duplicated PUSHes on the same connection, -+ * the original idea was that clients provided a 'Cache-Digest' header with -+ * the values of *their own* cached resources. This was described in -+ * -+ * and some subsequent revisions that tweaked values but kept the overall idea. -+ * - The draft was abandoned by the IETF http-wg, as support from major clients, -+ * e.g. browsers, was lacking for various reasons. -+ * - For these reasons, mod_h2 abandoned its support for client supplied values -+ * but keeps the diary. It seems to provide value for applications using PUSH, -+ * is configurable in size and defaults to a very moderate amount of memory -+ * used. -+ * - The cache digest header is a Golomb Coded Set of hash values, but it may -+ * limit the amount of bits per hash value even further. For a good description -+ * of GCS, read here: -+ * -+ ******************************************************************************/ -+ -+ -+/* -+ * The push diary is based on the abandoned draft -+ * -+ * that describes how to use golomb filters. -+ */ -+ - typedef struct h2_push_diary h2_push_diary; - - typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push); -@@ -101,20 +139,4 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p, - int maxP, const char *authority, - const char **pdata, apr_size_t *plen); - --/** -- * Initialize the push diary by a cache digest as described in -- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ -- * . -- * @param diary the diary to set the digest into -- * @param authority the authority to set the data for -- * @param data the binary cache digest -- * @param len the length of the cache digest -- * @return APR_EINVAL if digest was not successfully parsed -- */ --apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, -- const char *data, apr_size_t len); -- --apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, -- const char *data64url, apr_pool_t *pool); -- - #endif /* defined(__mod_h2__h2_push__) */ diff --git a/httpd-2.4.25-detect-systemd.patch b/httpd-2.4.43-detect-systemd.patch similarity index 54% rename from httpd-2.4.25-detect-systemd.patch rename to httpd-2.4.43-detect-systemd.patch index 718beae..540687f 100644 --- a/httpd-2.4.25-detect-systemd.patch +++ b/httpd-2.4.43-detect-systemd.patch @@ -1,25 +1,8 @@ -diff -uap httpd-2.4.25/configure.in.detectsystemd httpd-2.4.25/configure.in ---- httpd-2.4.25/configure.in.detectsystemd -+++ httpd-2.4.25/configure.in -@@ -234,6 +234,7 @@ - AC_MSG_NOTICE([Using external PCRE library from $PCRE_CONFIG]) - APR_ADDTO(PCRE_INCLUDES, [`$PCRE_CONFIG --cflags`]) - APR_ADDTO(PCRE_LIBS, [`$PCRE_CONFIG --libs`]) -+ APR_ADDTO(HTTPD_LIBS, [\$(PCRE_LIBS)]) - else - AC_MSG_ERROR([pcre-config for libpcre not found. PCRE is required and available from http://pcre.org/]) - fi -@@ -668,6 +676,7 @@ - APACHE_SUBST(BUILTIN_LIBS) - APACHE_SUBST(SHLIBPATH_VAR) - APACHE_SUBST(OS_SPECIFIC_VARS) -+APACHE_SUBST(HTTPD_LIBS) - - PRE_SHARED_CMDS='echo ""' - POST_SHARED_CMDS='echo ""' ---- httpd-2.4.25/Makefile.in.detectsystemd -+++ httpd-2.4.25/Makefile.in -@@ -4,7 +4,7 @@ +diff --git a/Makefile.in b/Makefile.in +index 0b088ac..9eeb5c7 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -4,7 +4,7 @@ CLEAN_SUBDIRS = test PROGRAM_NAME = $(progname) PROGRAM_SOURCES = modules.c @@ -28,3 +11,35 @@ diff -uap httpd-2.4.25/configure.in.detectsystemd httpd-2.4.25/configure.in PROGRAM_PRELINK = $(COMPILE) -c $(top_srcdir)/server/buildmark.c PROGRAM_DEPENDENCIES = \ server/libmain.la \ +diff --git a/acinclude.m4 b/acinclude.m4 +index 2a7e5d1..eb28321 100644 +--- a/acinclude.m4 ++++ b/acinclude.m4 +@@ -624,6 +624,7 @@ case $host in + if test "${ac_cv_header_systemd_sd_daemon_h}" = "no" || test -z "${SYSTEMD_LIBS}"; then + AC_MSG_WARN([Your system does not support systemd.]) + else ++ APR_ADDTO(HTTPD_LIBS, [$SYSTEMD_LIBS]) + AC_DEFINE(HAVE_SYSTEMD, 1, [Define if systemd is supported]) + fi + fi +diff --git a/configure.in b/configure.in +index 3618a5a..74a782b 100644 +--- a/configure.in ++++ b/configure.in +@@ -234,6 +234,7 @@ if test "$PCRE_CONFIG" != "false"; then + AC_MSG_NOTICE([Using external PCRE library from $PCRE_CONFIG]) + APR_ADDTO(PCRE_INCLUDES, [`$PCRE_CONFIG --cflags`]) + APR_ADDTO(PCRE_LIBS, [`$PCRE_CONFIG --libs`]) ++ APR_ADDTO(HTTPD_LIBS, [\$(PCRE_LIBS)]) + else + AC_MSG_ERROR([pcre-config for libpcre not found. PCRE is required and available from http://pcre.org/]) + fi +@@ -710,6 +711,7 @@ APACHE_SUBST(OS_DIR) + APACHE_SUBST(BUILTIN_LIBS) + APACHE_SUBST(SHLIBPATH_VAR) + APACHE_SUBST(OS_SPECIFIC_VARS) ++APACHE_SUBST(HTTPD_LIBS) + + PRE_SHARED_CMDS='echo ""' + POST_SHARED_CMDS='echo ""' diff --git a/httpd-2.4.43-gettid.patch b/httpd-2.4.43-gettid.patch new file mode 100644 index 0000000..f80b3a7 --- /dev/null +++ b/httpd-2.4.43-gettid.patch @@ -0,0 +1,93 @@ +From d4e5b6e1e5585d341d1e51f1ddc637c099111076 Mon Sep 17 00:00:00 2001 +From: Joe Orton +Date: Tue, 7 Jul 2020 09:48:01 +0100 +Subject: [PATCH] Check and use gettid() directly with glibc 2.30+. + +* configure.in: Check for gettid() and define HAVE_SYS_GETTID if + gettid() is only usable via syscall(). + +* server/log.c (log_tid): Use gettid() directly if available. +--- + configure.in | 14 +++++++++----- + server/log.c | 8 ++++++-- + 2 files changed, 15 insertions(+), 7 deletions(-) + +diff --git a/configure.in b/configure.in +index 423d58d4b9a..60cbf7b7f81 100644 +--- httpd-2.4.43/configure.in.gettid ++++ httpd-2.4.43/configure.in +@@ -478,7 +500,8 @@ + timegm \ + getpgid \ + fopen64 \ +-getloadavg ++getloadavg \ ++gettid + ) + + dnl confirm that a void pointer is large enough to store a long integer +@@ -489,16 +512,19 @@ + APR_ADDTO(HTTPD_LIBS, [-lselinux]) + ]) + +-AC_CACHE_CHECK([for gettid()], ac_cv_gettid, ++if test $ac_cv_func_gettid = no; then ++ # On Linux before glibc 2.30, gettid() is only usable via syscall() ++ AC_CACHE_CHECK([for gettid() via syscall], ap_cv_gettid, + [AC_TRY_RUN(#define _GNU_SOURCE + #include + #include + #include + int main(int argc, char **argv) { + pid_t t = syscall(SYS_gettid); return t == -1 ? 1 : 0; }, +-[ac_cv_gettid=yes], [ac_cv_gettid=no], [ac_cv_gettid=no])]) +-if test "$ac_cv_gettid" = "yes"; then +- AC_DEFINE(HAVE_GETTID, 1, [Define if you have gettid()]) ++ [ap_cv_gettid=yes], [ap_cv_gettid=no], [ap_cv_gettid=no])]) ++ if test "$ap_cv_gettid" = "yes"; then ++ AC_DEFINE(HAVE_SYS_GETTID, 1, [Define if you have gettid() via syscall()]) ++ fi + fi + + dnl ## Check for the tm_gmtoff field in struct tm to get the timezone diffs +--- httpd-2.4.43/server/log.c.gettid ++++ httpd-2.4.43/server/log.c +@@ -55,7 +55,7 @@ + #include "ap_mpm.h" + #include "ap_listen.h" + +-#if HAVE_GETTID ++#if HAVE_SYS_GETTID + #include + #include + #endif +@@ -625,14 +625,18 @@ + #if APR_HAS_THREADS + int result; + #endif +-#if HAVE_GETTID ++#if defined(HAVE_GETTID) || defined(HAVE_SYS_GETTID) + if (arg && *arg == 'g') { ++#ifdef HAVE_GETTID ++ pid_t tid = gettid(); ++#else + pid_t tid = syscall(SYS_gettid); ++#endif + if (tid == -1) + return 0; + return apr_snprintf(buf, buflen, "%"APR_PID_T_FMT, tid); + } +-#endif ++#endif /* HAVE_GETTID || HAVE_SYS_GETTID */ + #if APR_HAS_THREADS + if (ap_mpm_query(AP_MPMQ_IS_THREADED, &result) == APR_SUCCESS + && result != AP_MPMQ_NOT_SUPPORTED) +@@ -966,7 +970,7 @@ + #if APR_HAS_THREADS + field_start = len; + len += cpystrn(buf + len, ":tid ", buflen - len); +- item_len = log_tid(info, NULL, buf + len, buflen - len); ++ item_len = log_tid(info, "g", buf + len, buflen - len); + if (!item_len) + len = field_start; + else diff --git a/httpd-2.4.43-r1828172+.patch b/httpd-2.4.43-r1828172+.patch new file mode 100644 index 0000000..3487600 --- /dev/null +++ b/httpd-2.4.43-r1828172+.patch @@ -0,0 +1,1413 @@ +diff --git a/modules/generators/cgi_common.h b/modules/generators/cgi_common.h +new file mode 100644 +index 0000000..85c9685 +--- /dev/null ++++ b/modules/generators/cgi_common.h +@@ -0,0 +1,359 @@ ++/* Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++#include "apr.h" ++#include "apr_strings.h" ++#include "apr_buckets.h" ++#include "apr_lib.h" ++#include "apr_poll.h" ++ ++#define APR_WANT_STRFUNC ++#define APR_WANT_MEMFUNC ++#include "apr_want.h" ++ ++#include "httpd.h" ++#include "util_filter.h" ++ ++static void discard_script_output(apr_bucket_brigade *bb) ++{ ++ apr_bucket *e; ++ const char *buf; ++ apr_size_t len; ++ ++ for (e = APR_BRIGADE_FIRST(bb); ++ e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e); ++ e = APR_BRIGADE_FIRST(bb)) ++ { ++ if (apr_bucket_read(e, &buf, &len, APR_BLOCK_READ)) { ++ break; ++ } ++ apr_bucket_delete(e); ++ } ++} ++ ++#ifdef WANT_CGI_BUCKET ++/* A CGI bucket type is needed to catch any output to stderr from the ++ * script; see PR 22030. */ ++static const apr_bucket_type_t bucket_type_cgi; ++ ++struct cgi_bucket_data { ++ apr_pollset_t *pollset; ++ request_rec *r; ++ apr_interval_time_t timeout; ++}; ++ ++/* Create a CGI bucket using pipes from script stdout 'out' ++ * and stderr 'err', for request 'r'. */ ++static apr_bucket *cgi_bucket_create(request_rec *r, ++ apr_interval_time_t timeout, ++ apr_file_t *out, apr_file_t *err, ++ apr_bucket_alloc_t *list) ++{ ++ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); ++ apr_status_t rv; ++ apr_pollfd_t fd; ++ struct cgi_bucket_data *data = apr_palloc(r->pool, sizeof *data); ++ ++ /* Disable APR timeout handling since we'll use poll() entirely. */ ++ apr_file_pipe_timeout_set(out, 0); ++ apr_file_pipe_timeout_set(err, 0); ++ ++ APR_BUCKET_INIT(b); ++ b->free = apr_bucket_free; ++ b->list = list; ++ b->type = &bucket_type_cgi; ++ b->length = (apr_size_t)(-1); ++ b->start = -1; ++ ++ /* Create the pollset */ ++ rv = apr_pollset_create(&data->pollset, 2, r->pool, 0); ++ if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01217) ++ "apr_pollset_create(); check system or user limits"); ++ return NULL; ++ } ++ ++ fd.desc_type = APR_POLL_FILE; ++ fd.reqevents = APR_POLLIN; ++ fd.p = r->pool; ++ fd.desc.f = out; /* script's stdout */ ++ fd.client_data = (void *)1; ++ rv = apr_pollset_add(data->pollset, &fd); ++ if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01218) ++ "apr_pollset_add(); check system or user limits"); ++ return NULL; ++ } ++ ++ fd.desc.f = err; /* script's stderr */ ++ fd.client_data = (void *)2; ++ rv = apr_pollset_add(data->pollset, &fd); ++ if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01219) ++ "apr_pollset_add(); check system or user limits"); ++ return NULL; ++ } ++ ++ data->r = r; ++ data->timeout = timeout; ++ b->data = data; ++ return b; ++} ++ ++/* Create a duplicate CGI bucket using given bucket data */ ++static apr_bucket *cgi_bucket_dup(struct cgi_bucket_data *data, ++ apr_bucket_alloc_t *list) ++{ ++ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); ++ APR_BUCKET_INIT(b); ++ b->free = apr_bucket_free; ++ b->list = list; ++ b->type = &bucket_type_cgi; ++ b->length = (apr_size_t)(-1); ++ b->start = -1; ++ b->data = data; ++ return b; ++} ++ ++/* Handle stdout from CGI child. Duplicate of logic from the _read ++ * method of the real APR pipe bucket implementation. */ ++static apr_status_t cgi_read_stdout(apr_bucket *a, apr_file_t *out, ++ const char **str, apr_size_t *len) ++{ ++ char *buf; ++ apr_status_t rv; ++ ++ *str = NULL; ++ *len = APR_BUCKET_BUFF_SIZE; ++ buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */ ++ ++ rv = apr_file_read(out, buf, len); ++ ++ if (rv != APR_SUCCESS && rv != APR_EOF) { ++ apr_bucket_free(buf); ++ return rv; ++ } ++ ++ if (*len > 0) { ++ struct cgi_bucket_data *data = a->data; ++ apr_bucket_heap *h; ++ ++ /* Change the current bucket to refer to what we read */ ++ a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free); ++ h = a->data; ++ h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */ ++ *str = buf; ++ APR_BUCKET_INSERT_AFTER(a, cgi_bucket_dup(data, a->list)); ++ } ++ else { ++ apr_bucket_free(buf); ++ a = apr_bucket_immortal_make(a, "", 0); ++ *str = a->data; ++ } ++ return rv; ++} ++ ++/* Read method of CGI bucket: polls on stderr and stdout of the child, ++ * sending any stderr output immediately away to the error log. */ ++static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str, ++ apr_size_t *len, apr_read_type_e block) ++{ ++ struct cgi_bucket_data *data = b->data; ++ apr_interval_time_t timeout = 0; ++ apr_status_t rv; ++ int gotdata = 0; ++ ++ if (block != APR_NONBLOCK_READ) { ++ timeout = data->timeout > 0 ? data->timeout : data->r->server->timeout; ++ } ++ ++ do { ++ const apr_pollfd_t *results; ++ apr_int32_t num; ++ ++ rv = apr_pollset_poll(data->pollset, timeout, &num, &results); ++ if (APR_STATUS_IS_TIMEUP(rv)) { ++ if (timeout) { ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, data->r, APLOGNO(01220) ++ "Timeout waiting for output from CGI script %s", ++ data->r->filename); ++ return rv; ++ } ++ else { ++ return APR_EAGAIN; ++ } ++ } ++ else if (APR_STATUS_IS_EINTR(rv)) { ++ continue; ++ } ++ else if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, data->r, APLOGNO(01221) ++ "poll failed waiting for CGI child"); ++ return rv; ++ } ++ ++ for (; num; num--, results++) { ++ if (results[0].client_data == (void *)1) { ++ /* stdout */ ++ rv = cgi_read_stdout(b, results[0].desc.f, str, len); ++ if (APR_STATUS_IS_EOF(rv)) { ++ rv = APR_SUCCESS; ++ } ++ gotdata = 1; ++ } else { ++ /* stderr */ ++ apr_status_t rv2 = log_script_err(data->r, results[0].desc.f); ++ if (APR_STATUS_IS_EOF(rv2)) { ++ apr_pollset_remove(data->pollset, &results[0]); ++ } ++ } ++ } ++ ++ } while (!gotdata); ++ ++ return rv; ++} ++ ++static const apr_bucket_type_t bucket_type_cgi = { ++ "CGI", 5, APR_BUCKET_DATA, ++ apr_bucket_destroy_noop, ++ cgi_bucket_read, ++ apr_bucket_setaside_notimpl, ++ apr_bucket_split_notimpl, ++ apr_bucket_copy_notimpl ++}; ++ ++#endif /* WANT_CGI_BUCKET */ ++ ++/* Handle the CGI response output, having set up the brigade with the ++ * CGI or PIPE bucket as appropriate. */ ++static int cgi_handle_response(request_rec *r, int nph, apr_bucket_brigade *bb, ++ apr_interval_time_t timeout, cgi_server_conf *conf, ++ char *logdata, apr_file_t *script_err) ++{ ++ apr_status_t rv; ++ ++ /* Handle script return... */ ++ if (!nph) { ++ const char *location; ++ char sbuf[MAX_STRING_LEN]; ++ int ret; ++ ++ if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf, ++ APLOG_MODULE_INDEX))) ++ { ++ ret = log_script(r, conf, ret, logdata, sbuf, bb, script_err); ++ ++ /* ++ * ret could be HTTP_NOT_MODIFIED in the case that the CGI script ++ * does not set an explicit status and ap_meets_conditions, which ++ * is called by ap_scan_script_header_err_brigade, detects that ++ * the conditions of the requests are met and the response is ++ * not modified. ++ * In this case set r->status and return OK in order to prevent ++ * running through the error processing stack as this would ++ * break with mod_cache, if the conditions had been set by ++ * mod_cache itself to validate a stale entity. ++ * BTW: We circumvent the error processing stack anyway if the ++ * CGI script set an explicit status code (whatever it is) and ++ * the only possible values for ret here are: ++ * ++ * HTTP_NOT_MODIFIED (set by ap_meets_conditions) ++ * HTTP_PRECONDITION_FAILED (set by ap_meets_conditions) ++ * HTTP_INTERNAL_SERVER_ERROR (if something went wrong during the ++ * processing of the response of the CGI script, e.g broken headers ++ * or a crashed CGI process). ++ */ ++ if (ret == HTTP_NOT_MODIFIED) { ++ r->status = ret; ++ return OK; ++ } ++ ++ return ret; ++ } ++ ++ location = apr_table_get(r->headers_out, "Location"); ++ ++ if (location && r->status == 200) { ++ /* For a redirect whether internal or not, discard any ++ * remaining stdout from the script, and log any remaining ++ * stderr output, as normal. */ ++ discard_script_output(bb); ++ apr_brigade_destroy(bb); ++ ++ if (script_err) { ++ apr_file_pipe_timeout_set(script_err, timeout); ++ log_script_err(r, script_err); ++ } ++ } ++ ++ if (location && location[0] == '/' && r->status == 200) { ++ /* This redirect needs to be a GET no matter what the original ++ * method was. ++ */ ++ r->method = "GET"; ++ r->method_number = M_GET; ++ ++ /* We already read the message body (if any), so don't allow ++ * the redirected request to think it has one. We can ignore ++ * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR. ++ */ ++ apr_table_unset(r->headers_in, "Content-Length"); ++ ++ ap_internal_redirect_handler(location, r); ++ return OK; ++ } ++ else if (location && r->status == 200) { ++ /* XXX: Note that if a script wants to produce its own Redirect ++ * body, it now has to explicitly *say* "Status: 302" ++ */ ++ discard_script_output(bb); ++ apr_brigade_destroy(bb); ++ return HTTP_MOVED_TEMPORARILY; ++ } ++ ++ rv = ap_pass_brigade(r->output_filters, bb); ++ } ++ else /* nph */ { ++ struct ap_filter_t *cur; ++ ++ /* get rid of all filters up through protocol... since we ++ * haven't parsed off the headers, there is no way they can ++ * work ++ */ ++ ++ cur = r->proto_output_filters; ++ while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) { ++ cur = cur->next; ++ } ++ r->output_filters = r->proto_output_filters = cur; ++ ++ rv = ap_pass_brigade(r->output_filters, bb); ++ } ++ ++ /* don't soak up script output if errors occurred writing it ++ * out... otherwise, we prolong the life of the script when the ++ * connection drops or we stopped sending output for some other ++ * reason */ ++ if (script_err && rv == APR_SUCCESS && !r->connection->aborted) { ++ apr_file_pipe_timeout_set(script_err, timeout); ++ log_script_err(r, script_err); ++ } ++ ++ if (script_err) apr_file_close(script_err); ++ ++ return OK; /* NOT r->status, even if it has changed. */ ++} +diff --git a/modules/generators/config5.m4 b/modules/generators/config5.m4 +index bf29521..0863553 100644 +--- a/modules/generators/config5.m4 ++++ b/modules/generators/config5.m4 +@@ -78,4 +78,15 @@ fi + + APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current]) + ++AC_ARG_ENABLE(cgid-fdpassing, ++ [APACHE_HELP_STRING(--enable-cgid-fdpassing,Enable experimental mod_cgid support for fd passing)], ++ [if test "$enableval" = "yes"; then ++ AC_CHECK_DECL(CMSG_DATA, ++ [AC_DEFINE([HAVE_CGID_FDPASSING], 1, [Enable FD passing support in mod_cgid])], ++ [AC_MSG_ERROR([cannot support mod_cgid fd-passing on this system])], [ ++#include ++#include ]) ++ fi ++]) ++ + APACHE_MODPATH_FINISH +diff --git a/modules/generators/mod_cgi.c b/modules/generators/mod_cgi.c +index 7e4b126..f438b35 100644 +--- a/modules/generators/mod_cgi.c ++++ b/modules/generators/mod_cgi.c +@@ -92,6 +92,10 @@ typedef struct { + apr_size_t bufbytes; + } cgi_server_conf; + ++typedef struct { ++ apr_interval_time_t timeout; ++} cgi_dirconf; ++ + static void *create_cgi_config(apr_pool_t *p, server_rec *s) + { + cgi_server_conf *c = +@@ -112,6 +116,12 @@ static void *merge_cgi_config(apr_pool_t *p, void *basev, void *overridesv) + return overrides->logname ? overrides : base; + } + ++static void *create_cgi_dirconf(apr_pool_t *p, char *dummy) ++{ ++ cgi_dirconf *c = (cgi_dirconf *) apr_pcalloc(p, sizeof(cgi_dirconf)); ++ return c; ++} ++ + static const char *set_scriptlog(cmd_parms *cmd, void *dummy, const char *arg) + { + server_rec *s = cmd->server; +@@ -150,6 +160,17 @@ static const char *set_scriptlog_buffer(cmd_parms *cmd, void *dummy, + return NULL; + } + ++static const char *set_script_timeout(cmd_parms *cmd, void *dummy, const char *arg) ++{ ++ cgi_dirconf *dc = dummy; ++ ++ if (ap_timeout_parameter_parse(arg, &dc->timeout, "s") != APR_SUCCESS) { ++ return "CGIScriptTimeout has wrong format"; ++ } ++ ++ return NULL; ++} ++ + static const command_rec cgi_cmds[] = + { + AP_INIT_TAKE1("ScriptLog", set_scriptlog, NULL, RSRC_CONF, +@@ -158,6 +179,9 @@ AP_INIT_TAKE1("ScriptLogLength", set_scriptlog_length, NULL, RSRC_CONF, + "the maximum length (in bytes) of the script debug log"), + AP_INIT_TAKE1("ScriptLogBuffer", set_scriptlog_buffer, NULL, RSRC_CONF, + "the maximum size (in bytes) to record of a POST request"), ++AP_INIT_TAKE1("CGIScriptTimeout", set_script_timeout, NULL, RSRC_CONF | ACCESS_CONF, ++ "The amount of time to wait between successful reads from " ++ "the CGI script, in seconds."), + {NULL} + }; + +@@ -466,23 +490,26 @@ static apr_status_t run_cgi_child(apr_file_t **script_out, + apr_filepath_name_get(r->filename)); + } + else { ++ cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module); ++ apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout; ++ + apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT); + + *script_in = procnew->out; + if (!*script_in) + return APR_EBADF; +- apr_file_pipe_timeout_set(*script_in, r->server->timeout); ++ apr_file_pipe_timeout_set(*script_in, timeout); + + if (e_info->prog_type == RUN_AS_CGI) { + *script_out = procnew->in; + if (!*script_out) + return APR_EBADF; +- apr_file_pipe_timeout_set(*script_out, r->server->timeout); ++ apr_file_pipe_timeout_set(*script_out, timeout); + + *script_err = procnew->err; + if (!*script_err) + return APR_EBADF; +- apr_file_pipe_timeout_set(*script_err, r->server->timeout); ++ apr_file_pipe_timeout_set(*script_err, timeout); + } + } + } +@@ -536,209 +563,12 @@ static apr_status_t default_build_command(const char **cmd, const char ***argv, + return APR_SUCCESS; + } + +-static void discard_script_output(apr_bucket_brigade *bb) +-{ +- apr_bucket *e; +- const char *buf; +- apr_size_t len; +- +- for (e = APR_BRIGADE_FIRST(bb); +- e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e); +- e = APR_BRIGADE_FIRST(bb)) +- { +- if (apr_bucket_read(e, &buf, &len, APR_BLOCK_READ)) { +- break; +- } +- apr_bucket_delete(e); +- } +-} +- + #if APR_FILES_AS_SOCKETS +- +-/* A CGI bucket type is needed to catch any output to stderr from the +- * script; see PR 22030. */ +-static const apr_bucket_type_t bucket_type_cgi; +- +-struct cgi_bucket_data { +- apr_pollset_t *pollset; +- request_rec *r; +-}; +- +-/* Create a CGI bucket using pipes from script stdout 'out' +- * and stderr 'err', for request 'r'. */ +-static apr_bucket *cgi_bucket_create(request_rec *r, +- apr_file_t *out, apr_file_t *err, +- apr_bucket_alloc_t *list) +-{ +- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); +- apr_status_t rv; +- apr_pollfd_t fd; +- struct cgi_bucket_data *data = apr_palloc(r->pool, sizeof *data); +- +- APR_BUCKET_INIT(b); +- b->free = apr_bucket_free; +- b->list = list; +- b->type = &bucket_type_cgi; +- b->length = (apr_size_t)(-1); +- b->start = -1; +- +- /* Create the pollset */ +- rv = apr_pollset_create(&data->pollset, 2, r->pool, 0); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01217) +- "apr_pollset_create(); check system or user limits"); +- return NULL; +- } +- +- fd.desc_type = APR_POLL_FILE; +- fd.reqevents = APR_POLLIN; +- fd.p = r->pool; +- fd.desc.f = out; /* script's stdout */ +- fd.client_data = (void *)1; +- rv = apr_pollset_add(data->pollset, &fd); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01218) +- "apr_pollset_add(); check system or user limits"); +- return NULL; +- } +- +- fd.desc.f = err; /* script's stderr */ +- fd.client_data = (void *)2; +- rv = apr_pollset_add(data->pollset, &fd); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01219) +- "apr_pollset_add(); check system or user limits"); +- return NULL; +- } +- +- data->r = r; +- b->data = data; +- return b; +-} +- +-/* Create a duplicate CGI bucket using given bucket data */ +-static apr_bucket *cgi_bucket_dup(struct cgi_bucket_data *data, +- apr_bucket_alloc_t *list) +-{ +- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); +- APR_BUCKET_INIT(b); +- b->free = apr_bucket_free; +- b->list = list; +- b->type = &bucket_type_cgi; +- b->length = (apr_size_t)(-1); +- b->start = -1; +- b->data = data; +- return b; +-} +- +-/* Handle stdout from CGI child. Duplicate of logic from the _read +- * method of the real APR pipe bucket implementation. */ +-static apr_status_t cgi_read_stdout(apr_bucket *a, apr_file_t *out, +- const char **str, apr_size_t *len) +-{ +- char *buf; +- apr_status_t rv; +- +- *str = NULL; +- *len = APR_BUCKET_BUFF_SIZE; +- buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */ +- +- rv = apr_file_read(out, buf, len); +- +- if (rv != APR_SUCCESS && rv != APR_EOF) { +- apr_bucket_free(buf); +- return rv; +- } +- +- if (*len > 0) { +- struct cgi_bucket_data *data = a->data; +- apr_bucket_heap *h; +- +- /* Change the current bucket to refer to what we read */ +- a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free); +- h = a->data; +- h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */ +- *str = buf; +- APR_BUCKET_INSERT_AFTER(a, cgi_bucket_dup(data, a->list)); +- } +- else { +- apr_bucket_free(buf); +- a = apr_bucket_immortal_make(a, "", 0); +- *str = a->data; +- } +- return rv; +-} +- +-/* Read method of CGI bucket: polls on stderr and stdout of the child, +- * sending any stderr output immediately away to the error log. */ +-static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str, +- apr_size_t *len, apr_read_type_e block) +-{ +- struct cgi_bucket_data *data = b->data; +- apr_interval_time_t timeout; +- apr_status_t rv; +- int gotdata = 0; +- +- timeout = block == APR_NONBLOCK_READ ? 0 : data->r->server->timeout; +- +- do { +- const apr_pollfd_t *results; +- apr_int32_t num; +- +- rv = apr_pollset_poll(data->pollset, timeout, &num, &results); +- if (APR_STATUS_IS_TIMEUP(rv)) { +- if (timeout) { +- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, data->r, APLOGNO(01220) +- "Timeout waiting for output from CGI script %s", +- data->r->filename); +- return rv; +- } +- else { +- return APR_EAGAIN; +- } +- } +- else if (APR_STATUS_IS_EINTR(rv)) { +- continue; +- } +- else if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, data->r, APLOGNO(01221) +- "poll failed waiting for CGI child"); +- return rv; +- } +- +- for (; num; num--, results++) { +- if (results[0].client_data == (void *)1) { +- /* stdout */ +- rv = cgi_read_stdout(b, results[0].desc.f, str, len); +- if (APR_STATUS_IS_EOF(rv)) { +- rv = APR_SUCCESS; +- } +- gotdata = 1; +- } else { +- /* stderr */ +- apr_status_t rv2 = log_script_err(data->r, results[0].desc.f); +- if (APR_STATUS_IS_EOF(rv2)) { +- apr_pollset_remove(data->pollset, &results[0]); +- } +- } +- } +- +- } while (!gotdata); +- +- return rv; +-} +- +-static const apr_bucket_type_t bucket_type_cgi = { +- "CGI", 5, APR_BUCKET_DATA, +- apr_bucket_destroy_noop, +- cgi_bucket_read, +- apr_bucket_setaside_notimpl, +- apr_bucket_split_notimpl, +- apr_bucket_copy_notimpl +-}; +- ++#define WANT_CGI_BUCKET + #endif + ++#include "cgi_common.h" ++ + static int cgi_handler(request_rec *r) + { + int nph; +@@ -757,6 +587,8 @@ static int cgi_handler(request_rec *r) + apr_status_t rv; + cgi_exec_info_t e_info; + conn_rec *c; ++ cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module); ++ apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout; + + if (strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script")) { + return DECLINED; +@@ -916,10 +748,7 @@ static int cgi_handler(request_rec *r) + AP_DEBUG_ASSERT(script_in != NULL); + + #if APR_FILES_AS_SOCKETS +- apr_file_pipe_timeout_set(script_in, 0); +- apr_file_pipe_timeout_set(script_err, 0); +- +- b = cgi_bucket_create(r, script_in, script_err, c->bucket_alloc); ++ b = cgi_bucket_create(r, dc->timeout, script_in, script_err, c->bucket_alloc); + if (b == NULL) + return HTTP_INTERNAL_SERVER_ERROR; + #else +@@ -929,111 +758,7 @@ static int cgi_handler(request_rec *r) + b = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + +- /* Handle script return... */ +- if (!nph) { +- const char *location; +- char sbuf[MAX_STRING_LEN]; +- int ret; +- +- if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf, +- APLOG_MODULE_INDEX))) +- { +- ret = log_script(r, conf, ret, dbuf, sbuf, bb, script_err); +- +- /* +- * ret could be HTTP_NOT_MODIFIED in the case that the CGI script +- * does not set an explicit status and ap_meets_conditions, which +- * is called by ap_scan_script_header_err_brigade, detects that +- * the conditions of the requests are met and the response is +- * not modified. +- * In this case set r->status and return OK in order to prevent +- * running through the error processing stack as this would +- * break with mod_cache, if the conditions had been set by +- * mod_cache itself to validate a stale entity. +- * BTW: We circumvent the error processing stack anyway if the +- * CGI script set an explicit status code (whatever it is) and +- * the only possible values for ret here are: +- * +- * HTTP_NOT_MODIFIED (set by ap_meets_conditions) +- * HTTP_PRECONDITION_FAILED (set by ap_meets_conditions) +- * HTTP_INTERNAL_SERVER_ERROR (if something went wrong during the +- * processing of the response of the CGI script, e.g broken headers +- * or a crashed CGI process). +- */ +- if (ret == HTTP_NOT_MODIFIED) { +- r->status = ret; +- return OK; +- } +- +- return ret; +- } +- +- location = apr_table_get(r->headers_out, "Location"); +- +- if (location && r->status == 200) { +- /* For a redirect whether internal or not, discard any +- * remaining stdout from the script, and log any remaining +- * stderr output, as normal. */ +- discard_script_output(bb); +- apr_brigade_destroy(bb); +- apr_file_pipe_timeout_set(script_err, r->server->timeout); +- log_script_err(r, script_err); +- } +- +- if (location && location[0] == '/' && r->status == 200) { +- /* This redirect needs to be a GET no matter what the original +- * method was. +- */ +- r->method = "GET"; +- r->method_number = M_GET; +- +- /* We already read the message body (if any), so don't allow +- * the redirected request to think it has one. We can ignore +- * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR. +- */ +- apr_table_unset(r->headers_in, "Content-Length"); +- +- ap_internal_redirect_handler(location, r); +- return OK; +- } +- else if (location && r->status == 200) { +- /* XXX: Note that if a script wants to produce its own Redirect +- * body, it now has to explicitly *say* "Status: 302" +- */ +- return HTTP_MOVED_TEMPORARILY; +- } +- +- rv = ap_pass_brigade(r->output_filters, bb); +- } +- else /* nph */ { +- struct ap_filter_t *cur; +- +- /* get rid of all filters up through protocol... since we +- * haven't parsed off the headers, there is no way they can +- * work +- */ +- +- cur = r->proto_output_filters; +- while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) { +- cur = cur->next; +- } +- r->output_filters = r->proto_output_filters = cur; +- +- rv = ap_pass_brigade(r->output_filters, bb); +- } +- +- /* don't soak up script output if errors occurred writing it +- * out... otherwise, we prolong the life of the script when the +- * connection drops or we stopped sending output for some other +- * reason */ +- if (rv == APR_SUCCESS && !r->connection->aborted) { +- apr_file_pipe_timeout_set(script_err, r->server->timeout); +- log_script_err(r, script_err); +- } +- +- apr_file_close(script_err); +- +- return OK; /* NOT r->status, even if it has changed. */ ++ return cgi_handle_response(r, nph, bb, timeout, conf, dbuf, script_err); + } + + /*============================================================================ +@@ -1268,7 +993,7 @@ static void register_hooks(apr_pool_t *p) + AP_DECLARE_MODULE(cgi) = + { + STANDARD20_MODULE_STUFF, +- NULL, /* dir config creater */ ++ create_cgi_dirconf, /* dir config creater */ + NULL, /* dir merger --- default is to override */ + create_cgi_config, /* server config */ + merge_cgi_config, /* merge server config */ +diff --git a/modules/generators/mod_cgid.c b/modules/generators/mod_cgid.c +index 9f4282c..102d2b3 100644 +--- a/modules/generators/mod_cgid.c ++++ b/modules/generators/mod_cgid.c +@@ -342,15 +342,19 @@ static apr_status_t close_unix_socket(void *thefd) + return close(fd); + } + +-/* deal with incomplete reads and signals +- * assume you really have to read buf_size bytes +- */ +-static apr_status_t sock_read(int fd, void *vbuf, size_t buf_size) ++/* Read from the socket dealing with incomplete messages and signals. ++ * Returns 0 on success or errno on failure. Stderr fd passed as ++ * auxiliary data from other end is written to *errfd, or else stderr ++ * fileno if not present. */ ++static apr_status_t sock_readhdr(int fd, int *errfd, void *vbuf, size_t buf_size) + { +- char *buf = vbuf; + int rc; ++#ifndef HAVE_CGID_FDPASSING ++ char *buf = vbuf; + size_t bytes_read = 0; + ++ if (errfd) *errfd = 0; ++ + do { + do { + rc = read(fd, buf + bytes_read, buf_size - bytes_read); +@@ -365,9 +369,60 @@ static apr_status_t sock_read(int fd, void *vbuf, size_t buf_size) + } + } while (bytes_read < buf_size); + ++ ++#else /* with FD passing */ ++ struct msghdr msg = {0}; ++ struct iovec vec = {vbuf, buf_size}; ++ struct cmsghdr *cmsg; ++ union { /* union to ensure alignment */ ++ struct cmsghdr cm; ++ char buf[CMSG_SPACE(sizeof(int))]; ++ } u; ++ ++ msg.msg_iov = &vec; ++ msg.msg_iovlen = 1; ++ ++ if (errfd) { ++ msg.msg_control = u.buf; ++ msg.msg_controllen = sizeof(u.buf); ++ *errfd = 0; ++ } ++ ++ /* use MSG_WAITALL to skip loop on truncated reads */ ++ do { ++ rc = recvmsg(fd, &msg, MSG_WAITALL); ++ } while (rc < 0 && errno == EINTR); ++ ++ if (rc == 0) { ++ return ECONNRESET; ++ } ++ else if (rc < 0) { ++ return errno; ++ } ++ else if (rc != buf_size) { ++ /* MSG_WAITALL should ensure the recvmsg blocks until the ++ * entire length is read, but let's be paranoid. */ ++ return APR_INCOMPLETE; ++ } ++ ++ if (errfd ++ && (cmsg = CMSG_FIRSTHDR(&msg)) != NULL ++ && cmsg->cmsg_len == CMSG_LEN(sizeof(*errfd)) ++ && cmsg->cmsg_level == SOL_SOCKET ++ && cmsg->cmsg_type == SCM_RIGHTS) { ++ *errfd = *((int *) CMSG_DATA(cmsg)); ++ } ++#endif ++ + return APR_SUCCESS; + } + ++/* As sock_readhdr but without auxiliary fd passing. */ ++static apr_status_t sock_read(int fd, void *vbuf, size_t buf_size) ++{ ++ return sock_readhdr(fd, NULL, vbuf, buf_size); ++} ++ + /* deal with signals + */ + static apr_status_t sock_write(int fd, const void *buf, size_t buf_size) +@@ -384,7 +439,7 @@ static apr_status_t sock_write(int fd, const void *buf, size_t buf_size) + return APR_SUCCESS; + } + +-static apr_status_t sock_writev(int fd, request_rec *r, int count, ...) ++static apr_status_t sock_writev(int fd, int auxfd, request_rec *r, int count, ...) + { + va_list ap; + int rc; +@@ -399,9 +454,39 @@ static apr_status_t sock_writev(int fd, request_rec *r, int count, ...) + } + va_end(ap); + ++#ifndef HAVE_CGID_FDPASSING + do { + rc = writev(fd, vec, count); + } while (rc < 0 && errno == EINTR); ++#else ++ { ++ struct msghdr msg = { 0 }; ++ struct cmsghdr *cmsg; ++ union { /* union for alignment */ ++ char buf[CMSG_SPACE(sizeof(int))]; ++ struct cmsghdr align; ++ } u; ++ ++ msg.msg_iov = vec; ++ msg.msg_iovlen = count; ++ ++ if (auxfd) { ++ msg.msg_control = u.buf; ++ msg.msg_controllen = sizeof(u.buf); ++ ++ cmsg = CMSG_FIRSTHDR(&msg); ++ cmsg->cmsg_level = SOL_SOCKET; ++ cmsg->cmsg_type = SCM_RIGHTS; ++ cmsg->cmsg_len = CMSG_LEN(sizeof(int)); ++ *((int *) CMSG_DATA(cmsg)) = auxfd; ++ } ++ ++ do { ++ rc = sendmsg(fd, &msg, 0); ++ } while (rc < 0 && errno == EINTR); ++ } ++#endif ++ + if (rc < 0) { + return errno; + } +@@ -410,7 +495,7 @@ static apr_status_t sock_writev(int fd, request_rec *r, int count, ...) + } + + static apr_status_t get_req(int fd, request_rec *r, char **argv0, char ***env, +- cgid_req_t *req) ++ int *errfd, cgid_req_t *req) + { + int i; + char **environ; +@@ -421,7 +506,7 @@ static apr_status_t get_req(int fd, request_rec *r, char **argv0, char ***env, + r->server = apr_pcalloc(r->pool, sizeof(server_rec)); + + /* read the request header */ +- stat = sock_read(fd, req, sizeof(*req)); ++ stat = sock_readhdr(fd, errfd, req, sizeof(*req)); + if (stat != APR_SUCCESS) { + return stat; + } +@@ -479,14 +564,15 @@ static apr_status_t get_req(int fd, request_rec *r, char **argv0, char ***env, + return APR_SUCCESS; + } + +-static apr_status_t send_req(int fd, request_rec *r, char *argv0, char **env, +- int req_type) ++static apr_status_t send_req(int fd, apr_file_t *errpipe, request_rec *r, ++ char *argv0, char **env, int req_type) + { + int i; + cgid_req_t req = {0}; + apr_status_t stat; + ap_unix_identity_t * ugid = ap_run_get_suexec_identity(r); + core_dir_config *core_conf = ap_get_core_module_config(r->per_dir_config); ++ int errfd; + + + if (ugid == NULL) { +@@ -507,16 +593,21 @@ static apr_status_t send_req(int fd, request_rec *r, char *argv0, char **env, + req.args_len = r->args ? strlen(r->args) : 0; + req.loglevel = r->server->log.level; + ++ if (errpipe) ++ apr_os_file_get(&errfd, errpipe); ++ else ++ errfd = 0; ++ + /* Write the request header */ + if (req.args_len) { +- stat = sock_writev(fd, r, 5, ++ stat = sock_writev(fd, errfd, r, 5, + &req, sizeof(req), + r->filename, req.filename_len, + argv0, req.argv0_len, + r->uri, req.uri_len, + r->args, req.args_len); + } else { +- stat = sock_writev(fd, r, 4, ++ stat = sock_writev(fd, errfd, r, 4, + &req, sizeof(req), + r->filename, req.filename_len, + argv0, req.argv0_len, +@@ -531,7 +622,7 @@ static apr_status_t send_req(int fd, request_rec *r, char *argv0, char **env, + for (i = 0; i < req.env_count; i++) { + apr_size_t curlen = strlen(env[i]); + +- if ((stat = sock_writev(fd, r, 2, &curlen, sizeof(curlen), ++ if ((stat = sock_writev(fd, 0, r, 2, &curlen, sizeof(curlen), + env[i], curlen)) != APR_SUCCESS) { + return stat; + } +@@ -582,20 +673,34 @@ static void daemon_signal_handler(int sig) + } + } + ++/* Callback executed in the forked child process if exec of the CGI ++ * script fails. For the fd-passing case, output to stderr goes to ++ * the client (request handling thread) and is logged via ++ * ap_log_rerror there. For the non-fd-passing case, the "fake" ++ * request_rec passed via userdata is used to log. */ + static void cgid_child_errfn(apr_pool_t *pool, apr_status_t err, + const char *description) + { +- request_rec *r; + void *vr; + + apr_pool_userdata_get(&vr, ERRFN_USERDATA_KEY, pool); +- r = vr; +- +- /* sure we got r, but don't call ap_log_rerror() because we don't +- * have r->headers_in and possibly other storage referenced by +- * ap_log_rerror() +- */ +- ap_log_error(APLOG_MARK, APLOG_ERR, err, r->server, APLOGNO(01241) "%s", description); ++ if (vr) { ++ request_rec *r = vr; ++ ++ /* sure we got r, but don't call ap_log_rerror() because we don't ++ * have r->headers_in and possibly other storage referenced by ++ * ap_log_rerror() ++ */ ++ ap_log_error(APLOG_MARK, APLOG_ERR, err, r->server, APLOGNO(01241) "%s", description); ++ } ++ else { ++ const char *logstr; ++ ++ logstr = apr_psprintf(pool, APLOGNO(01241) "error spawning CGI child: %s (%pm)\n", ++ description, &err); ++ fputs(logstr, stderr); ++ fflush(stderr); ++ } + } + + static int cgid_server(void *data) +@@ -669,7 +774,7 @@ static int cgid_server(void *data) + } + + while (!daemon_should_exit) { +- int errfileno = STDERR_FILENO; ++ int errfileno; + char *argv0 = NULL; + char **env = NULL; + const char * const *argv; +@@ -709,7 +814,7 @@ static int cgid_server(void *data) + r = apr_pcalloc(ptrans, sizeof(request_rec)); + procnew = apr_pcalloc(ptrans, sizeof(*procnew)); + r->pool = ptrans; +- stat = get_req(sd2, r, &argv0, &env, &cgid_req); ++ stat = get_req(sd2, r, &argv0, &env, &errfileno, &cgid_req); + if (stat != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, stat, + main_server, APLOGNO(01248) +@@ -741,6 +846,16 @@ static int cgid_server(void *data) + continue; + } + ++ if (errfileno == 0) { ++ errfileno = STDERR_FILENO; ++ } ++ else { ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, main_server, ++ "using passed fd %d as stderr", errfileno); ++ /* Limit the received fd lifetime to pool lifetime */ ++ apr_pool_cleanup_register(ptrans, (void *)((long)errfileno), ++ close_unix_socket, close_unix_socket); ++ } + apr_os_file_put(&r->server->error_log, &errfileno, 0, r->pool); + apr_os_file_put(&inout, &sd2, 0, r->pool); + +@@ -800,7 +915,10 @@ static int cgid_server(void *data) + close(sd2); + } + else { +- apr_pool_userdata_set(r, ERRFN_USERDATA_KEY, apr_pool_cleanup_null, ptrans); ++ if (errfileno == STDERR_FILENO) { ++ /* Used by cgid_child_errfn without fd-passing. */ ++ apr_pool_userdata_set(r, ERRFN_USERDATA_KEY, apr_pool_cleanup_null, ptrans); ++ } + + argv = (const char * const *)create_argv(r->pool, NULL, NULL, NULL, argv0, r->args); + +@@ -1099,6 +1217,33 @@ static int log_scripterror(request_rec *r, cgid_server_conf * conf, int ret, + return ret; + } + ++/* Soak up stderr from a script and redirect it to the error log. ++ * TODO: log_scripterror() and this could move to cgi_common.h. */ ++static apr_status_t log_script_err(request_rec *r, apr_file_t *script_err) ++{ ++ char argsbuffer[HUGE_STRING_LEN]; ++ char *newline; ++ apr_status_t rv; ++ cgid_server_conf *conf = ap_get_module_config(r->server->module_config, &cgid_module); ++ ++ while ((rv = apr_file_gets(argsbuffer, HUGE_STRING_LEN, ++ script_err)) == APR_SUCCESS) { ++ ++ newline = strchr(argsbuffer, '\n'); ++ if (newline) { ++ char *prev = newline - 1; ++ if (prev >= argsbuffer && *prev == '\r') { ++ newline = prev; ++ } ++ ++ *newline = '\0'; ++ } ++ log_scripterror(r, conf, r->status, 0, argsbuffer); ++ } ++ ++ return rv; ++} ++ + static int log_script(request_rec *r, cgid_server_conf * conf, int ret, + char *dbuf, const char *sbuf, apr_bucket_brigade *bb, + apr_file_t *script_err) +@@ -1204,6 +1349,13 @@ static int log_script(request_rec *r, cgid_server_conf * conf, int ret, + return ret; + } + ++/* Pull in CGI bucket implementation. */ ++#define cgi_server_conf cgid_server_conf ++#ifdef HAVE_CGID_FDPASSING ++#define WANT_CGI_BUCKET ++#endif ++#include "cgi_common.h" ++ + static int connect_to_daemon(int *sdptr, request_rec *r, + cgid_server_conf *conf) + { +@@ -1270,23 +1422,6 @@ static int connect_to_daemon(int *sdptr, request_rec *r, + return OK; + } + +-static void discard_script_output(apr_bucket_brigade *bb) +-{ +- apr_bucket *e; +- const char *buf; +- apr_size_t len; +- +- for (e = APR_BRIGADE_FIRST(bb); +- e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e); +- e = APR_BRIGADE_FIRST(bb)) +- { +- if (apr_bucket_read(e, &buf, &len, APR_BLOCK_READ)) { +- break; +- } +- apr_bucket_delete(e); +- } +-} +- + /**************************************************************** + * + * Actual cgid handling... +@@ -1391,6 +1526,7 @@ static apr_status_t cleanup_script(void *vptr) + + static int cgid_handler(request_rec *r) + { ++ conn_rec *c = r->connection; + int retval, nph, dbpos; + char *argv0, *dbuf; + apr_bucket_brigade *bb; +@@ -1400,10 +1536,11 @@ static int cgid_handler(request_rec *r) + int seen_eos, child_stopped_reading; + int sd; + char **env; +- apr_file_t *tempsock; ++ apr_file_t *tempsock, *script_err, *errpipe_out; + struct cleanup_script_info *info; + apr_status_t rv; + cgid_dirconf *dc; ++ apr_interval_time_t timeout; + + if (strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script")) { + return DECLINED; +@@ -1412,7 +1549,7 @@ static int cgid_handler(request_rec *r) + conf = ap_get_module_config(r->server->module_config, &cgid_module); + dc = ap_get_module_config(r->per_dir_config, &cgid_module); + +- ++ timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout; + is_included = !strcmp(r->protocol, "INCLUDED"); + + if ((argv0 = strrchr(r->filename, '/')) != NULL) { +@@ -1465,6 +1602,17 @@ static int cgid_handler(request_rec *r) + } + */ + ++#ifdef HAVE_CGID_FDPASSING ++ rv = apr_file_pipe_create(&script_err, &errpipe_out, r->pool); ++ if (rv) { ++ return log_scripterror(r, conf, HTTP_SERVICE_UNAVAILABLE, rv, APLOGNO(10176) ++ "could not create pipe for stderr"); ++ } ++#else ++ script_err = NULL; ++ errpipe_out = NULL; ++#endif ++ + /* + * httpd core function used to add common environment variables like + * DOCUMENT_ROOT. +@@ -1477,12 +1625,16 @@ static int cgid_handler(request_rec *r) + return retval; + } + +- rv = send_req(sd, r, argv0, env, CGI_REQ); ++ rv = send_req(sd, errpipe_out, r, argv0, env, CGI_REQ); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01268) + "write to cgi daemon process"); + } + ++ /* The write-end of the pipe is only used by the server, so close ++ * it here. */ ++ if (errpipe_out) apr_file_close(errpipe_out); ++ + info = apr_palloc(r->pool, sizeof(struct cleanup_script_info)); + info->conf = conf; + info->r = r; +@@ -1504,12 +1656,7 @@ static int cgid_handler(request_rec *r) + */ + + apr_os_pipe_put_ex(&tempsock, &sd, 1, r->pool); +- if (dc->timeout > 0) { +- apr_file_pipe_timeout_set(tempsock, dc->timeout); +- } +- else { +- apr_file_pipe_timeout_set(tempsock, r->server->timeout); +- } ++ apr_file_pipe_timeout_set(tempsock, timeout); + apr_pool_cleanup_kill(r->pool, (void *)((long)sd), close_unix_socket); + + /* Transfer any put/post args, CERN style... +@@ -1601,114 +1748,19 @@ static int cgid_handler(request_rec *r) + */ + shutdown(sd, 1); + +- /* Handle script return... */ +- if (!nph) { +- conn_rec *c = r->connection; +- const char *location; +- char sbuf[MAX_STRING_LEN]; +- int ret; +- +- bb = apr_brigade_create(r->pool, c->bucket_alloc); +- b = apr_bucket_pipe_create(tempsock, c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- b = apr_bucket_eos_create(c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- +- if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf, +- APLOG_MODULE_INDEX))) +- { +- ret = log_script(r, conf, ret, dbuf, sbuf, bb, NULL); +- +- /* +- * ret could be HTTP_NOT_MODIFIED in the case that the CGI script +- * does not set an explicit status and ap_meets_conditions, which +- * is called by ap_scan_script_header_err_brigade, detects that +- * the conditions of the requests are met and the response is +- * not modified. +- * In this case set r->status and return OK in order to prevent +- * running through the error processing stack as this would +- * break with mod_cache, if the conditions had been set by +- * mod_cache itself to validate a stale entity. +- * BTW: We circumvent the error processing stack anyway if the +- * CGI script set an explicit status code (whatever it is) and +- * the only possible values for ret here are: +- * +- * HTTP_NOT_MODIFIED (set by ap_meets_conditions) +- * HTTP_PRECONDITION_FAILED (set by ap_meets_conditions) +- * HTTP_INTERNAL_SERVER_ERROR (if something went wrong during the +- * processing of the response of the CGI script, e.g broken headers +- * or a crashed CGI process). +- */ +- if (ret == HTTP_NOT_MODIFIED) { +- r->status = ret; +- return OK; +- } +- +- return ret; +- } +- +- location = apr_table_get(r->headers_out, "Location"); +- +- if (location && location[0] == '/' && r->status == 200) { +- +- /* Soak up all the script output */ +- discard_script_output(bb); +- apr_brigade_destroy(bb); +- /* This redirect needs to be a GET no matter what the original +- * method was. +- */ +- r->method = "GET"; +- r->method_number = M_GET; +- +- /* We already read the message body (if any), so don't allow +- * the redirected request to think it has one. We can ignore +- * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR. +- */ +- apr_table_unset(r->headers_in, "Content-Length"); +- +- ap_internal_redirect_handler(location, r); +- return OK; +- } +- else if (location && r->status == 200) { +- /* XXX: Note that if a script wants to produce its own Redirect +- * body, it now has to explicitly *say* "Status: 302" +- */ +- discard_script_output(bb); +- apr_brigade_destroy(bb); +- return HTTP_MOVED_TEMPORARILY; +- } +- +- rv = ap_pass_brigade(r->output_filters, bb); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, +- "Failed to flush CGI output to client"); +- } +- } +- +- if (nph) { +- conn_rec *c = r->connection; +- struct ap_filter_t *cur; +- +- /* get rid of all filters up through protocol... since we +- * haven't parsed off the headers, there is no way they can +- * work +- */ +- +- cur = r->proto_output_filters; +- while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) { +- cur = cur->next; +- } +- r->output_filters = r->proto_output_filters = cur; +- +- bb = apr_brigade_create(r->pool, c->bucket_alloc); +- b = apr_bucket_pipe_create(tempsock, c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- b = apr_bucket_eos_create(c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- ap_pass_brigade(r->output_filters, bb); +- } ++ bb = apr_brigade_create(r->pool, c->bucket_alloc); ++#ifdef HAVE_CGID_FDPASSING ++ b = cgi_bucket_create(r, dc->timeout, tempsock, script_err, c->bucket_alloc); ++ if (b == NULL) ++ return HTTP_INTERNAL_SERVER_ERROR; /* should call log_scripterror() w/ _UNAVAILABLE? */ ++#else ++ b = apr_bucket_pipe_create(tempsock, c->bucket_alloc); ++#endif ++ APR_BRIGADE_INSERT_TAIL(bb, b); ++ b = apr_bucket_eos_create(c->bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(bb, b); + +- return OK; /* NOT r->status, even if it has changed. */ ++ return cgi_handle_response(r, nph, bb, timeout, conf, dbuf, script_err); + } + + +@@ -1825,7 +1877,7 @@ static int include_cmd(include_ctx_t *ctx, ap_filter_t *f, + return retval; + } + +- send_req(sd, r, command, env, SSI_REQ); ++ send_req(sd, NULL, r, command, env, SSI_REQ); + + info = apr_palloc(r->pool, sizeof(struct cleanup_script_info)); + info->conf = conf; diff --git a/httpd-2.4.43-r1861793+.patch b/httpd-2.4.43-r1861793+.patch new file mode 100644 index 0000000..08e96cb --- /dev/null +++ b/httpd-2.4.43-r1861793+.patch @@ -0,0 +1,271 @@ +diff --git a/configure.in b/configure.in +index cb43246..0bb6b0d 100644 +--- httpd-2.4.43/configure.in.r1861793+ ++++ httpd-2.4.43/configure.in +@@ -465,6 +465,28 @@ + AC_SEARCH_LIBS(crypt, crypt) + CRYPT_LIBS="$LIBS" + APACHE_SUBST(CRYPT_LIBS) ++ ++if test "$ac_cv_search_crypt" != "no"; then ++ # Test crypt() with the SHA-512 test vector from https://akkadia.org/drepper/SHA-crypt.txt ++ AC_CACHE_CHECK([whether crypt() supports SHA-2], [ap_cv_crypt_sha2], [ ++ AC_RUN_IFELSE([AC_LANG_PROGRAM([[ ++#include ++#include ++#include ++ ++#define PASSWD_0 "Hello world!" ++#define SALT_0 "\$6\$saltstring" ++#define EXPECT_0 "\$6\$saltstring\$svn8UoSVapNtMuq1ukKS4tPQd8iKwSMHWjl/O817G3uBnIFNjnQJu" \ ++ "esI68u4OTLiBFdcbYEdFCoEOfaS35inz1" ++]], [char *result = crypt(PASSWD_0, SALT_0); ++ if (!result) return 1; ++ if (strcmp(result, EXPECT_0)) return 2; ++])], [ap_cv_crypt_sha2=yes], [ap_cv_crypt_sha2=no])]) ++ if test "$ap_cv_crypt_sha2" = yes; then ++ AC_DEFINE([HAVE_CRYPT_SHA2], 1, [Define if crypt() supports SHA-2 hashes]) ++ fi ++fi ++ + LIBS="$saved_LIBS" + + dnl See Comment #Spoon +--- httpd-2.4.43/docs/man/htpasswd.1.r1861793+ ++++ httpd-2.4.43/docs/man/htpasswd.1 +@@ -27,16 +27,16 @@ + .SH "SYNOPSIS" + + .PP +-\fB\fBhtpasswd\fR [ -\fBc\fR ] [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR\fR ++\fB\fBhtpasswd\fR [ -\fBc\fR ] [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fB2\fR | -\fB5\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR\fR + + .PP +-\fB\fBhtpasswd\fR -\fBb\fR [ -\fBc\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR \fIpassword\fR\fR ++\fB\fBhtpasswd\fR -\fBb\fR [ -\fBc\fR ] [ -\fBm\fR | -\fBB\fR | -\fB2\fR | -\fB5\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR \fIpassword\fR\fR + + .PP +-\fB\fBhtpasswd\fR -\fBn\fR [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR\fR ++\fB\fBhtpasswd\fR -\fBn\fR [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fB2\fR | -\fB5\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR\fR + + .PP +-\fB\fBhtpasswd\fR -\fBnb\fR [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR \fIpassword\fR\fR ++\fB\fBhtpasswd\fR -\fBnb\fR [ -\fBm\fR | -\fBB\fR | -\fB2\fR | -\fB5\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR \fIpassword\fR\fR + + + .SH "SUMMARY" +@@ -48,7 +48,7 @@ + Resources available from the Apache HTTP server can be restricted to just the users listed in the files created by \fBhtpasswd\fR\&. This program can only manage usernames and passwords stored in a flat-file\&. It can encrypt and display password information for use in other types of data stores, though\&. To use a DBM database see dbmmanage or htdbm\&. + + .PP +-\fBhtpasswd\fR encrypts passwords using either bcrypt, a version of MD5 modified for Apache, SHA1, or the system's \fBcrypt()\fR routine\&. Files managed by \fBhtpasswd\fR may contain a mixture of different encoding types of passwords; some user records may have bcrypt or MD5-encrypted passwords while others in the same file may have passwords encrypted with \fBcrypt()\fR\&. ++\fBhtpasswd\fR encrypts passwords using either bcrypt, a version of MD5 modified for Apache, SHA-1, or the system's \fBcrypt()\fR routine\&. SHA-2-based hashes (SHA-256 and SHA-512) are supported for \fBcrypt()\fR\&. Files managed by \fBhtpasswd\fR may contain a mixture of different encoding types of passwords; some user records may have bcrypt or MD5-encrypted passwords while others in the same file may have passwords encrypted with \fBcrypt()\fR\&. + + .PP + This manual page only lists the command line arguments\&. For details of the directives necessary to configure user authentication in httpd see the Apache manual, which is part of the Apache distribution or can be found at http://httpd\&.apache\&.org/\&. +@@ -73,17 +73,26 @@ + \fB-m\fR + Use MD5 encryption for passwords\&. This is the default (since version 2\&.2\&.18)\&. + .TP ++\fB-2\fR ++Use SHA-256 \fBcrypt()\fR based hashes for passwords\&. This is supported on most Unix platforms\&. ++.TP ++\fB-5\fR ++Use SHA-512 \fBcrypt()\fR based hashes for passwords\&. This is supported on most Unix platforms\&. ++.TP + \fB-B\fR + Use bcrypt encryption for passwords\&. This is currently considered to be very secure\&. + .TP + \fB-C\fR + This flag is only allowed in combination with \fB-B\fR (bcrypt encryption)\&. It sets the computing time used for the bcrypt algorithm (higher is more secure but slower, default: 5, valid: 4 to 17)\&. + .TP ++\fB-r\fR ++This flag is only allowed in combination with \fB-2\fR or \fB-5\fR\&. It sets the number of hash rounds used for the SHA-2 algorithms (higher is more secure but slower; the default is 5,000)\&. ++.TP + \fB-d\fR + Use \fBcrypt()\fR encryption for passwords\&. This is not supported by the httpd server on Windows and Netware\&. This algorithm limits the password length to 8 characters\&. This algorithm is \fBinsecure\fR by today's standards\&. It used to be the default algorithm until version 2\&.2\&.17\&. + .TP + \fB-s\fR +-Use SHA encryption for passwords\&. Facilitates migration from/to Netscape servers using the LDAP Directory Interchange Format (ldif)\&. This algorithm is \fBinsecure\fR by today's standards\&. ++Use SHA-1 (160-bit) encryption for passwords\&. Facilitates migration from/to Netscape servers using the LDAP Directory Interchange Format (ldif)\&. This algorithm is \fBinsecure\fR by today's standards\&. + .TP + \fB-p\fR + Use plaintext passwords\&. Though \fBhtpasswd\fR will support creation on all platforms, the httpd daemon will only accept plain text passwords on Windows and Netware\&. +@@ -152,10 +161,13 @@ + When using the \fBcrypt()\fR algorithm, note that only the first 8 characters of the password are used to form the password\&. If the supplied password is longer, the extra characters will be silently discarded\&. + + .PP +-The SHA encryption format does not use salting: for a given password, there is only one encrypted representation\&. The \fBcrypt()\fR and MD5 formats permute the representation by prepending a random salt string, to make dictionary attacks against the passwords more difficult\&. ++The SHA-1 encryption format does not use salting: for a given password, there is only one encrypted representation\&. The \fBcrypt()\fR and MD5 formats permute the representation by prepending a random salt string, to make dictionary attacks against the passwords more difficult\&. ++ ++.PP ++The SHA-1 and \fBcrypt()\fR formats are insecure by today's standards\&. + + .PP +-The SHA and \fBcrypt()\fR formats are insecure by today's standards\&. ++The SHA-2-based \fBcrypt()\fR formats (SHA-256 and SHA-512) are supported on most modern Unix systems, and follow the specification at https://www\&.akkadia\&.org/drepper/SHA-crypt\&.txt\&. + + .SH "RESTRICTIONS" + +--- httpd-2.4.43/support/htpasswd.c.r1861793+ ++++ httpd-2.4.43/support/htpasswd.c +@@ -109,17 +109,21 @@ + "for it." NL + " -i Read password from stdin without verification (for script usage)." NL + " -m Force MD5 encryption of the password (default)." NL +- " -B Force bcrypt encryption of the password (very secure)." NL ++ " -2 Force SHA-256 crypt() hash of the password (very secure)." NL ++ " -5 Force SHA-512 crypt() hash of the password (very secure)." NL ++ " -B Force bcrypt encryption of the password (very secure)." NL + " -C Set the computing time used for the bcrypt algorithm" NL + " (higher is more secure but slower, default: %d, valid: 4 to 17)." NL ++ " -r Set the number of rounds used for the SHA-256, SHA-512 algorithms" NL ++ " (higher is more secure but slower, default: 5000)." NL + " -d Force CRYPT encryption of the password (8 chars max, insecure)." NL +- " -s Force SHA encryption of the password (insecure)." NL ++ " -s Force SHA-1 encryption of the password (insecure)." NL + " -p Do not encrypt the password (plaintext, insecure)." NL + " -D Delete the specified user." NL + " -v Verify password for the specified user." NL + "On other systems than Windows and NetWare the '-p' flag will " + "probably not work." NL +- "The SHA algorithm does not use a salt and is less secure than the " ++ "The SHA-1 algorithm does not use a salt and is less secure than the " + "MD5 algorithm." NL, + BCRYPT_DEFAULT_COST + ); +@@ -178,7 +182,7 @@ + if (rv != APR_SUCCESS) + exit(ERR_SYNTAX); + +- while ((rv = apr_getopt(state, "cnmspdBbDiC:v", &opt, &opt_arg)) == APR_SUCCESS) { ++ while ((rv = apr_getopt(state, "cnmspdBbDi25C:r:v", &opt, &opt_arg)) == APR_SUCCESS) { + switch (opt) { + case 'c': + *mask |= APHTP_NEWFILE; +--- httpd-2.4.43/support/passwd_common.c.r1861793+ ++++ httpd-2.4.43/support/passwd_common.c +@@ -179,16 +179,21 @@ + int mkhash(struct passwd_ctx *ctx) + { + char *pw; +- char salt[16]; ++ char salt[17]; + apr_status_t rv; + int ret = 0; + #if CRYPT_ALGO_SUPPORTED + char *cbuf; + #endif ++#ifdef HAVE_CRYPT_SHA2 ++ const char *setting; ++ char method; ++#endif + +- if (ctx->cost != 0 && ctx->alg != ALG_BCRYPT) { ++ if (ctx->cost != 0 && ctx->alg != ALG_BCRYPT ++ && ctx->alg != ALG_CRYPT_SHA256 && ctx->alg != ALG_CRYPT_SHA512 ) { + apr_file_printf(errfile, +- "Warning: Ignoring -C argument for this algorithm." NL); ++ "Warning: Ignoring -C/-r argument for this algorithm." NL); + } + + if (ctx->passwd == NULL) { +@@ -246,6 +251,34 @@ + break; + #endif /* CRYPT_ALGO_SUPPORTED */ + ++#ifdef HAVE_CRYPT_SHA2 ++ case ALG_CRYPT_SHA256: ++ case ALG_CRYPT_SHA512: ++ ret = generate_salt(salt, 16, &ctx->errstr, ctx->pool); ++ if (ret != 0) ++ break; ++ ++ method = ctx->alg == ALG_CRYPT_SHA256 ? '5': '6'; ++ ++ if (ctx->cost) ++ setting = apr_psprintf(ctx->pool, "$%c$rounds=%d$%s", ++ method, ctx->cost, salt); ++ else ++ setting = apr_psprintf(ctx->pool, "$%c$%s", ++ method, salt); ++ ++ cbuf = crypt(pw, setting); ++ if (cbuf == NULL) { ++ rv = APR_FROM_OS_ERROR(errno); ++ ctx->errstr = apr_psprintf(ctx->pool, "crypt() failed: %pm", &rv); ++ ret = ERR_PWMISMATCH; ++ break; ++ } ++ ++ apr_cpystrn(ctx->out, cbuf, ctx->out_len - 1); ++ break; ++#endif /* HAVE_CRYPT_SHA2 */ ++ + #if BCRYPT_ALGO_SUPPORTED + case ALG_BCRYPT: + rv = apr_generate_random_bytes((unsigned char*)salt, 16); +@@ -294,6 +327,19 @@ + case 's': + ctx->alg = ALG_APSHA; + break; ++#ifdef HAVE_CRYPT_SHA2 ++ case '2': ++ ctx->alg = ALG_CRYPT_SHA256; ++ break; ++ case '5': ++ ctx->alg = ALG_CRYPT_SHA512; ++ break; ++#else ++ case '2': ++ case '5': ++ ctx->errstr = "SHA-2 crypt() algorithms are not supported on this platform."; ++ return ERR_ALG_NOT_SUPP; ++#endif + case 'p': + ctx->alg = ALG_PLAIN; + #if !PLAIN_ALGO_SUPPORTED +@@ -324,11 +370,12 @@ + return ERR_ALG_NOT_SUPP; + #endif + break; +- case 'C': { ++ case 'C': ++ case 'r': { + char *endptr; + long num = strtol(opt_arg, &endptr, 10); + if (*endptr != '\0' || num <= 0) { +- ctx->errstr = "argument to -C must be a positive integer"; ++ ctx->errstr = "argument to -C/-r must be a positive integer"; + return ERR_SYNTAX; + } + ctx->cost = num; +--- httpd-2.4.43/support/passwd_common.h.r1861793+ ++++ httpd-2.4.43/support/passwd_common.h +@@ -28,6 +28,8 @@ + #include "apu_version.h" + #endif + ++#include "ap_config_auto.h" ++ + #define MAX_STRING_LEN 256 + + #define ALG_PLAIN 0 +@@ -35,6 +37,8 @@ + #define ALG_APMD5 2 + #define ALG_APSHA 3 + #define ALG_BCRYPT 4 ++#define ALG_CRYPT_SHA256 5 ++#define ALG_CRYPT_SHA512 6 + + #define BCRYPT_DEFAULT_COST 5 + +@@ -84,7 +88,7 @@ + apr_size_t out_len; + char *passwd; + int alg; +- int cost; ++ int cost; /* cost for bcrypt, rounds for SHA-2 */ + enum { + PW_PROMPT = 0, + PW_ARG, diff --git a/httpd-2.4.43.tar.bz2 b/httpd-2.4.43.tar.bz2 deleted file mode 100644 index 1ca6123..0000000 Binary files a/httpd-2.4.43.tar.bz2 and /dev/null differ diff --git a/httpd-2.4.46-htcacheclean-dont-break.patch b/httpd-2.4.46-htcacheclean-dont-break.patch new file mode 100644 index 0000000..e52318a --- /dev/null +++ b/httpd-2.4.46-htcacheclean-dont-break.patch @@ -0,0 +1,13 @@ +diff --git a/support/htcacheclean.c b/support/htcacheclean.c +index 958ba6d..0a7fe3c 100644 +--- a/support/htcacheclean.c ++++ b/support/htcacheclean.c +@@ -557,8 +557,6 @@ static int list_urls(char *path, apr_pool_t *pool, apr_off_t round) + } + } + } +- +- break; + } + } + } diff --git a/httpd-2.4.43-lua-resume.patch b/httpd-2.4.46-lua-resume.patch similarity index 75% rename from httpd-2.4.43-lua-resume.patch rename to httpd-2.4.46-lua-resume.patch index c954338..1a22008 100644 --- a/httpd-2.4.43-lua-resume.patch +++ b/httpd-2.4.46-lua-resume.patch @@ -1,5 +1,27 @@ +diff --git a/modules/lua/config.m4 b/modules/lua/config.m4 +index 29fd563..abeba1c 100644 +--- a/modules/lua/config.m4 ++++ b/modules/lua/config.m4 +@@ -34,7 +34,7 @@ AC_DEFUN([CHECK_LUA_PATH], [dnl + fi + ]) + +-dnl Check for Lua 5.3/5.2/5.1 Libraries ++dnl Check for Lua Libraries + dnl CHECK_LUA(ACTION-IF-FOUND [, ACTION-IF-NOT-FOUND]) + dnl Sets: + dnl LUA_CFLAGS +@@ -44,7 +44,7 @@ AC_DEFUN([CHECK_LUA], + + AC_ARG_WITH( + lua, +- [AC_HELP_STRING([--with-lua=PATH],[Path to the Lua 5.3/5.2/5.1 prefix])], ++ [AC_HELP_STRING([--with-lua=PATH],[Path to the Lua installation prefix])], + lua_path="$withval", + :) + diff --git a/modules/lua/mod_lua.c b/modules/lua/mod_lua.c -index 05f1e44..be3bedf 100644 +index 05f1e44..18b628c 100644 --- a/modules/lua/mod_lua.c +++ b/modules/lua/mod_lua.c @@ -342,7 +342,7 @@ static apr_status_t lua_setup_filter_ctx(ap_filter_t* f, request_rec* r, lua_fil @@ -34,7 +56,7 @@ index 05f1e44..be3bedf 100644 /* If Lua yielded, it means we have something to pass on */ - if (lua_resume(L, 0) == LUA_YIELD) { -+ if (lua_resume(L, 0, &nres) == LUA_YIELD) { ++ if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { size_t olen; const char* output = lua_tolstring(L, 1, &olen); if (olen > 0) { @@ -43,7 +65,7 @@ index 05f1e44..be3bedf 100644 lua_pushnil(L); lua_setglobal(L, "bucket"); - if (lua_resume(L, 0) == LUA_YIELD) { -+ if (lua_resume(L, 0, &nres) == LUA_YIELD) { ++ if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { apr_bucket *pbktOut; size_t olen; const char* output = lua_tolstring(L, 1, &olen); @@ -61,7 +83,7 @@ index 05f1e44..be3bedf 100644 /* If Lua yielded, it means we have something to pass on */ - if (lua_resume(L, 0) == LUA_YIELD) { -+ if (lua_resume(L, 0, &nres) == LUA_YIELD) { ++ if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { size_t olen; const char* output = lua_tolstring(L, 1, &olen); pbktOut = apr_bucket_heap_create(output, olen, 0, c->bucket_alloc); @@ -70,15 +92,15 @@ index 05f1e44..be3bedf 100644 lua_pushnil(L); lua_setglobal(L, "bucket"); - if (lua_resume(L, 0) == LUA_YIELD) { -+ if (lua_resume(L, 0, &nres) == LUA_YIELD) { ++ if (lua_resume(L, 0, &nres) == LUA_YIELD && nres == 1) { apr_bucket *pbktOut; size_t olen; const char* output = lua_tolstring(L, 1, &olen); diff --git a/modules/lua/mod_lua.h b/modules/lua/mod_lua.h -index 0e49cdc..8921b87 100644 +index 0e49cdc..72b4de7 100644 --- a/modules/lua/mod_lua.h +++ b/modules/lua/mod_lua.h -@@ -48,7 +48,13 @@ +@@ -48,7 +48,15 @@ #if LUA_VERSION_NUM > 501 /* Load mode for lua_load() */ #define lua_load(a,b,c,d) lua_load(a,b,c,d,NULL) @@ -87,7 +109,9 @@ index 0e49cdc..8921b87 100644 +#if LUA_VERSION_NUM > 503 +#define lua_resume(a,b,c) lua_resume(a, NULL, b, c) +#else -+#define lua_resume(a,b,c) lua_resume(a, NULL, b) ++/* ### For version < 5.4, assume that exactly one stack item is on the ++ * stack, which is what the code did before but seems dubious. */ ++#define lua_resume(a,b,c) (*(c) = 1, lua_resume(a, NULL, b)) +#endif + #define luaL_setfuncs_compat(a,b) luaL_setfuncs(a,b,0) diff --git a/httpd-2.4.46.tar.bz2 b/httpd-2.4.46.tar.bz2 new file mode 100644 index 0000000..ee55f07 Binary files /dev/null and b/httpd-2.4.46.tar.bz2 differ diff --git a/httpd.spec b/httpd.spec index a92c893..09aa627 100644 --- a/httpd.spec +++ b/httpd.spec @@ -7,8 +7,8 @@ Name: httpd Summary: Apache HTTP Server -Version: 2.4.43 -Release: 4 +Version: 2.4.46 +Release: 1 License: ASL 2.0 URL: https://httpd.apache.org/ Source0: https://archive.apache.org/dist/httpd/httpd-%{version}.tar.bz2 @@ -54,7 +54,7 @@ Patch0: httpd-2.4.1-apctl.patch Patch1: httpd-2.4.9-apxs.patch Patch2: httpd-2.4.1-deplibs.patch Patch3: httpd-2.4.3-apctl-systemd.patch -Patch4: httpd-2.4.25-detect-systemd.patch +Patch4: httpd-2.4.43-detect-systemd.patch Patch5: httpd-2.4.33-export.patch Patch6: httpd-2.4.1-corelimit.patch Patch7: httpd-2.4.25-selinux.patch @@ -65,10 +65,11 @@ Patch11: httpd-2.4.34-sslciphdefault.patch Patch12: httpd-2.4.34-sslprotdefault.patch Patch13: httpd-2.4.34-enable-sslv3.patch Patch14: layout_add_openEuler.patch -Patch15: httpd-2.4.43-lua-resume.patch -Patch16: CVE-2020-11984.patch -Patch17: CVE-2020-11993.patch -Patch18: CVE-2020-9490.patch +Patch15: httpd-2.4.46-lua-resume.patch +Patch16: httpd-2.4.43-gettid.patch +Patch17: httpd-2.4.43-r1861793+.patch +Patch18: httpd-2.4.43-r1828172+.patch +Patch19: httpd-2.4.46-htcacheclean-dont-break.patch BuildRequires: gcc autoconf pkgconfig findutils xmlto perl-interpreter perl-generators systemd-devel BuildRequires: zlib-devel libselinux-devel lua-devel brotli-devel @@ -355,10 +356,6 @@ exit 0 %postun %systemd_postun httpd.service htcacheclean.service httpd.socket -%triggerun -- httpd < 2.2.21-5 -/usr/bin/systemd-sysv-convert --save httpd.service >/dev/null 2>&1 ||: -/sbin/chkconfig --del httpd >/dev/null 2>&1 || : - %posttrans test -f /etc/sysconfig/httpd-disable-posttrans || \ /bin/systemctl try-restart --no-block httpd.service htcacheclean.service >/dev/null 2>&1 || : @@ -505,6 +502,12 @@ exit $rv %{_rpmconfigdir}/macros.d/macros.httpd %changelog +* Tue Jan 26 2021 xihaochen - 2.4.46-1 +- Type:requirements +- ID:NA +- SUG:NA +- DESC: update httpd to 2.4.46 + * Sun Sep 27 2020 yuboyun - 2.4.43-4 - Type:cves - ID:CVE-2020-9490 CVE-2020-11984 CVE-2020-11993