summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortron <tron>2011-09-14 18:03:18 +0000
committertron <tron>2011-09-14 18:03:18 +0000
commit3906b3624a8207e8044bad0d77f985eab3d178fa (patch)
tree599117314aad22ad3ea2e7ba42e72eceaf68771b
parent6abea35e8aa1325cc250e66ebe9698bf8f9a6881 (diff)
downloadpkgsrc-3906b3624a8207e8044bad0d77f985eab3d178fa.tar.gz
Pullup ticket #3526 - requested by taca
www/apache22: security update Revisions pulled up: - www/apache22/Makefile 1.68-1.70 - www/apache22/distinfo 1.40-1.42 - www/apache22/patches/patch-CVE-2011-3192 deleted - www/apache22/patches/patch-lock.c 1.1 - www/apache22/patches/patch-repos.c 1.1 --- Module Name: pkgsrc Committed By: tron Date: Wed Aug 31 12:52:45 UTC 2011 Modified Files: pkgsrc/www/apache22: Makefile distinfo Removed Files: pkgsrc/www/apache22/patches: patch-CVE-2011-3192 Log Message: Update "apache22" package to version 2.2.20. Changes since version 2.2.19: - mod_authnz_ldap: If the LDAP server returns constraint violation, don't treat this as an error but as "auth denied". [Stefan Fritsch] - mod_filter: Fix FilterProvider conditions of type "resp=" (response headers) for CGI. [Joe Orton, Rainer Jung] - mod_reqtimeout: Fix a timed out connection going into the keep-alive state after a timeout when discarding a request body. Bug 51103. [Stefan Fritsch] - core: Do the hook sorting earlier so that the hooks are properly sorted for the pre_config hook and during parsing the config. [Stefan Fritsch] --- Module Name: pkgsrc Committed By: sborrill Date: Mon Sep 12 17:18:46 UTC 2011 Modified Files: pkgsrc/www/apache22: Makefile distinfo Added Files: pkgsrc/www/apache22/patches: patch-lock.c patch-repos.c Log Message: Atomically create files when using DAV to stop files being deleted on error From: https://issues.apache.org/bugzilla/show_bug.cgi?id=39815 Bump PKGREVISION. OK tron@ --- Module Name: pkgsrc Committed By: taca Date: Wed Sep 14 07:10:21 UTC 2011 Modified Files: pkgsrc/www/apache22: Makefile distinfo Log Message: Update apahce22 package to 2.2.21. Quote from release announce: The Apache Software Foundation and the Apache HTTP Server Project are pleased to announce the release of version 2.2.21 of the Apache HTTP Server ("Apache"). This version of Apache is principally a security and bug fix release: * SECURITY: CVE-2011-3348 (cve.mitre.org) mod_proxy_ajp when combined with mod_proxy_balancer: Prevents unrecognized HTTP methods from marking ajp: balancer members in an error state, avoiding denial of service. * SECURITY: CVE-2011-3192 (cve.mitre.org) core: Further fixes to the handling of byte-range requests to use less memory, to avoid denial of service. This patch includes fixes to the patch introduced in release 2.2.20 for protocol compliance, as well as the MaxRanges directive. Note the further advisories on the state of CVE-2011-3192 will no longer be broadcast, but will be kept up to date at; http://httpd.apache.org/security/CVE-2011-3192.txt We consider this release to be the best version of Apache available, and encourage users of all prior versions to upgrade.
-rw-r--r--www/apache22/Makefile6
-rw-r--r--www/apache22/distinfo11
-rw-r--r--www/apache22/patches/patch-CVE-2011-3192604
-rw-r--r--www/apache22/patches/patch-lock.c58
-rw-r--r--www/apache22/patches/patch-repos.c99
5 files changed, 166 insertions, 612 deletions
diff --git a/www/apache22/Makefile b/www/apache22/Makefile
index dd14513d60c..8b41a98e3b3 100644
--- a/www/apache22/Makefile
+++ b/www/apache22/Makefile
@@ -1,7 +1,7 @@
-# $NetBSD: Makefile,v 1.66.2.1 2011/08/30 08:10:22 sbd Exp $
+# $NetBSD: Makefile,v 1.66.2.2 2011/09/14 18:03:18 tron Exp $
+
+DISTNAME= httpd-2.2.21
-DISTNAME= httpd-2.2.19
-PKGREVISION= 1
PKGNAME= ${DISTNAME:S/httpd/apache/}
CATEGORIES= www
MASTER_SITES= ${MASTER_SITE_APACHE:=httpd/} \
diff --git a/www/apache22/distinfo b/www/apache22/distinfo
index 4e937a763ea..3f596ea2a3b 100644
--- a/www/apache22/distinfo
+++ b/www/apache22/distinfo
@@ -1,9 +1,8 @@
-$NetBSD: distinfo,v 1.38.2.1 2011/08/30 08:10:22 sbd Exp $
+$NetBSD: distinfo,v 1.38.2.2 2011/09/14 18:03:18 tron Exp $
-SHA1 (httpd-2.2.19.tar.bz2) = 5676da63f3203129287d7c09a16cf523c00ec6cf
-RMD160 (httpd-2.2.19.tar.bz2) = faa901121776092604d2f4ad61366e1a1fabaefd
-Size (httpd-2.2.19.tar.bz2) = 5322082 bytes
-SHA1 (patch-CVE-2011-3192) = b4762ca0682a1dc388da9d030a8866635bc4ebd7
+SHA1 (httpd-2.2.21.tar.bz2) = c02f9b05da9a7e316ff37d9053dc76a57ba51cb4
+RMD160 (httpd-2.2.21.tar.bz2) = 6464a03d78ab858b1288ea9eef4cd5f73b60a9f1
+Size (httpd-2.2.21.tar.bz2) = 5324905 bytes
SHA1 (patch-aa) = e0bfdf6bc9cb034bea46a390a12a5508e363c9a7
SHA1 (patch-ab) = 365cc3b0ac2d9d68ccb94f5699fe168a1c9b0150
SHA1 (patch-ac) = 515043b5c215d49fe8f6d3191b502c978e2a2dad
@@ -15,3 +14,5 @@ SHA1 (patch-ai) = 4ebc3bd580a298973928eb6d13d2ce745eac0312
SHA1 (patch-al) = 56b9f5c2f6fd01fe5067f9210e328cbf674c68f1
SHA1 (patch-am) = ab4a2f7e5a1a3064e908b61157e7fd349c0b0c08
SHA1 (patch-aw) = ca53d67beeb2c2c4d9adb04d3d79e24a8c427fd4
+SHA1 (patch-lock.c) = 770ca03f1cb4421879bd5baa5a7c30cc91acb6e1
+SHA1 (patch-repos.c) = 0e0361b91d4b0fe6c7c55a12fdfd2e6aacc710e1
diff --git a/www/apache22/patches/patch-CVE-2011-3192 b/www/apache22/patches/patch-CVE-2011-3192
deleted file mode 100644
index b954e935c65..00000000000
--- a/www/apache22/patches/patch-CVE-2011-3192
+++ /dev/null
@@ -1,604 +0,0 @@
-$NetBSD: patch-CVE-2011-3192,v 1.1.2.2 2011/08/30 08:10:22 sbd Exp $
-
-Fix DoS vulnerability reported in patch-CVE-2011-3192. Patch taken
-from the Apache SVN repository:
-
-http://svn.apache.org/viewvc/httpd/httpd/branches/2.2.x/modules/http/byterange_filter.c?view=log
-
---- modules/http/byterange_filter.c.orig 2010-02-26 09:32:15.000000000 +0000
-+++ modules/http/byterange_filter.c 2011-08-29 22:54:16.000000000 +0100
-@@ -55,65 +55,8 @@
- #include <unistd.h>
- #endif
-
--static int parse_byterange(char *range, apr_off_t clength,
-- apr_off_t *start, apr_off_t *end)
--{
-- char *dash = strchr(range, '-');
-- char *errp;
-- apr_off_t number;
--
-- if (!dash) {
-- return 0;
-- }
--
-- if ((dash == range)) {
-- /* In the form "-5" */
-- if (apr_strtoff(&number, dash+1, &errp, 10) || *errp) {
-- return 0;
-- }
-- *start = clength - number;
-- *end = clength - 1;
-- }
-- else {
-- *dash++ = '\0';
-- if (apr_strtoff(&number, range, &errp, 10) || *errp) {
-- return 0;
-- }
-- *start = number;
-- if (*dash) {
-- if (apr_strtoff(&number, dash, &errp, 10) || *errp) {
-- return 0;
-- }
-- *end = number;
-- }
-- else { /* "5-" */
-- *end = clength - 1;
-- }
-- }
--
-- if (*start < 0) {
-- *start = 0;
-- }
--
-- if (*end >= clength) {
-- *end = clength - 1;
-- }
--
-- if (*start > *end) {
-- return -1;
-- }
--
-- return (*start > 0 || *end < clength);
--}
--
--static int ap_set_byterange(request_rec *r);
--
--typedef struct byterange_ctx {
-- apr_bucket_brigade *bb;
-- int num_ranges;
-- char *boundary;
-- char *bound_head;
--} byterange_ctx;
-+static int ap_set_byterange(request_rec *r, apr_off_t clength,
-+ apr_array_header_t **indexes);
-
- /*
- * Here we try to be compatible with clients that want multipart/x-byteranges
-@@ -131,28 +74,200 @@
- }
-
- #define BYTERANGE_FMT "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT "/%" APR_OFF_T_FMT
--#define PARTITION_ERR_FMT "apr_brigade_partition() failed " \
-- "[%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]"
-+
-+static apr_status_t copy_brigade_range(apr_bucket_brigade *bb,
-+ apr_bucket_brigade *bbout,
-+ apr_off_t start,
-+ apr_off_t end)
-+{
-+ apr_bucket *first = NULL, *last = NULL, *out_first = NULL, *e;
-+ apr_uint64_t pos = 0, off_first = 0, off_last = 0;
-+ apr_status_t rv;
-+ const char *s;
-+ apr_size_t len;
-+ apr_uint64_t start64, end64;
-+ apr_off_t pofft = 0;
-+
-+ /*
-+ * Once we know that start and end are >= 0 convert everything to apr_uint64_t.
-+ * See the comments in apr_brigade_partition why.
-+ * In short apr_off_t (for values >= 0)and apr_size_t fit into apr_uint64_t.
-+ */
-+ start64 = (apr_uint64_t)start;
-+ end64 = (apr_uint64_t)end;
-+
-+ if (start < 0 || end < 0 || start64 > end64)
-+ return APR_EINVAL;
-+
-+ for (e = APR_BRIGADE_FIRST(bb);
-+ e != APR_BRIGADE_SENTINEL(bb);
-+ e = APR_BUCKET_NEXT(e))
-+ {
-+ apr_uint64_t elen64;
-+ /* we know that no bucket has undefined length (-1) */
-+ AP_DEBUG_ASSERT(e->length != (apr_size_t)(-1));
-+ elen64 = (apr_uint64_t)e->length;
-+ if (!first && (elen64 + pos > start64)) {
-+ first = e;
-+ off_first = pos;
-+ }
-+ if (elen64 + pos > end64) {
-+ last = e;
-+ off_last = pos;
-+ break;
-+ }
-+ pos += elen64;
-+ }
-+ if (!first || !last)
-+ return APR_EINVAL;
-+
-+ e = first;
-+ while (1)
-+ {
-+ apr_bucket *copy;
-+ AP_DEBUG_ASSERT(e != APR_BRIGADE_SENTINEL(bb));
-+ rv = apr_bucket_copy(e, &copy);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+
-+ APR_BRIGADE_INSERT_TAIL(bbout, copy);
-+ if (e == first) {
-+ if (off_first != start64) {
-+ rv = apr_bucket_split(copy, (apr_size_t)(start64 - off_first));
-+ if (rv == APR_ENOTIMPL) {
-+ rv = apr_bucket_read(copy, &s, &len, APR_BLOCK_READ);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ /*
-+ * The read above might have morphed copy in a bucket
-+ * of shorter length. So read and delete until we reached
-+ * the correct bucket for splitting.
-+ */
-+ while (start64 - off_first > (apr_uint64_t)copy->length) {
-+ apr_bucket *tmp = APR_BUCKET_NEXT(copy);
-+ off_first += (apr_uint64_t)copy->length;
-+ APR_BUCKET_REMOVE(copy);
-+ apr_bucket_destroy(copy);
-+ copy = tmp;
-+ rv = apr_bucket_read(copy, &s, &len, APR_BLOCK_READ);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ }
-+ if (start64 > off_first) {
-+ rv = apr_bucket_split(copy, (apr_size_t)(start64 - off_first));
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ }
-+ else {
-+ copy = APR_BUCKET_PREV(copy);
-+ }
-+ }
-+ else if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ out_first = APR_BUCKET_NEXT(copy);
-+ APR_BUCKET_REMOVE(copy);
-+ apr_bucket_destroy(copy);
-+ }
-+ else {
-+ out_first = copy;
-+ }
-+ }
-+ if (e == last) {
-+ if (e == first) {
-+ off_last += start64 - off_first;
-+ copy = out_first;
-+ }
-+ if (end64 - off_last != (apr_uint64_t)e->length) {
-+ rv = apr_bucket_split(copy, (apr_size_t)(end64 + 1 - off_last));
-+ if (rv == APR_ENOTIMPL) {
-+ rv = apr_bucket_read(copy, &s, &len, APR_BLOCK_READ);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ /*
-+ * The read above might have morphed copy in a bucket
-+ * of shorter length. So read until we reached
-+ * the correct bucket for splitting.
-+ */
-+ while (end64 + 1 - off_last > (apr_uint64_t)copy->length) {
-+ off_last += (apr_uint64_t)copy->length;
-+ copy = APR_BUCKET_NEXT(copy);
-+ rv = apr_bucket_read(copy, &s, &len, APR_BLOCK_READ);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ }
-+ if (end64 < off_last + (apr_uint64_t)copy->length - 1) {
-+ rv = apr_bucket_split(copy, end64 + 1 - off_last);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ }
-+ }
-+ else if (rv != APR_SUCCESS) {
-+ apr_brigade_cleanup(bbout);
-+ return rv;
-+ }
-+ copy = APR_BUCKET_NEXT(copy);
-+ if (copy != APR_BRIGADE_SENTINEL(bbout)) {
-+ APR_BUCKET_REMOVE(copy);
-+ apr_bucket_destroy(copy);
-+ }
-+ }
-+ break;
-+ }
-+ e = APR_BUCKET_NEXT(e);
-+ }
-+
-+ AP_DEBUG_ASSERT(APR_SUCCESS == apr_brigade_length(bbout, 1, &pofft));
-+ pos = (apr_uint64_t)pofft;
-+ AP_DEBUG_ASSERT(pos == end64 - start64 + 1);
-+ return APR_SUCCESS;
-+}
-+
-+typedef struct indexes_t {
-+ apr_off_t start;
-+ apr_off_t end;
-+} indexes_t;
-
- AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
- apr_bucket_brigade *bb)
- {
--#define MIN_LENGTH(len1, len2) ((len1 > len2) ? len2 : len1)
- request_rec *r = f->r;
- conn_rec *c = r->connection;
-- byterange_ctx *ctx;
- apr_bucket *e;
- apr_bucket_brigade *bsend;
-+ apr_bucket_brigade *tmpbb;
- apr_off_t range_start;
- apr_off_t range_end;
-- char *current;
- apr_off_t clength = 0;
- apr_status_t rv;
- int found = 0;
- int num_ranges;
--
-- /* Iterate through the brigade until reaching EOS or a bucket with
-- * unknown length. */
-+ char *boundary = NULL;
-+ char *bound_head = NULL;
-+ apr_array_header_t *indexes;
-+ indexes_t *idx;
-+ int original_status;
-+ int i;
-+
-+ /*
-+ * Iterate through the brigade until reaching EOS or a bucket with
-+ * unknown length.
-+ */
- for (e = APR_BRIGADE_FIRST(bb);
- (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)
- && e->length != (apr_size_t)-1);
-@@ -160,90 +275,80 @@
- clength += e->length;
- }
-
-- /* Don't attempt to do byte range work if this brigade doesn't
-+ /*
-+ * Don't attempt to do byte range work if this brigade doesn't
- * contain an EOS, or if any of the buckets has an unknown length;
- * this avoids the cases where it is expensive to perform
-- * byteranging (i.e. may require arbitrary amounts of memory). */
-+ * byteranging (i.e. may require arbitrary amounts of memory).
-+ */
- if (!APR_BUCKET_IS_EOS(e) || clength <= 0) {
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
- }
-
-- num_ranges = ap_set_byterange(r);
-+ original_status = r->status;
-+ num_ranges = ap_set_byterange(r, clength, &indexes);
-
- /* We have nothing to do, get out of the way. */
- if (num_ranges == 0) {
-+ r->status = original_status;
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
- }
-
-- ctx = apr_pcalloc(r->pool, sizeof(*ctx));
-- ctx->num_ranges = num_ranges;
-- /* create a brigade in case we never call ap_save_brigade() */
-- ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc);
--
-- if (ctx->num_ranges > 1) {
-+ if (num_ranges > 1) {
- /* Is ap_make_content_type required here? */
- const char *orig_ct = ap_make_content_type(r, r->content_type);
-- ctx->boundary = apr_psprintf(r->pool, "%" APR_UINT64_T_HEX_FMT "%lx",
-- (apr_uint64_t)r->request_time, (long) getpid());
-+ boundary = apr_psprintf(r->pool, "%" APR_UINT64_T_HEX_FMT "%lx",
-+ (apr_uint64_t)r->request_time, (long) getpid());
-
- ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
- use_range_x(r) ? "/x-" : "/",
- "byteranges; boundary=",
-- ctx->boundary, NULL));
-+ boundary, NULL));
-
- if (strcasecmp(orig_ct, NO_CONTENT_TYPE)) {
-- ctx->bound_head = apr_pstrcat(r->pool,
-- CRLF "--", ctx->boundary,
-- CRLF "Content-type: ",
-- orig_ct,
-- CRLF "Content-range: bytes ",
-- NULL);
-+ bound_head = apr_pstrcat(r->pool,
-+ CRLF "--", boundary,
-+ CRLF "Content-type: ",
-+ orig_ct,
-+ CRLF "Content-range: bytes ",
-+ NULL);
- }
- else {
- /* if we have no type for the content, do our best */
-- ctx->bound_head = apr_pstrcat(r->pool,
-- CRLF "--", ctx->boundary,
-- CRLF "Content-range: bytes ",
-- NULL);
-+ bound_head = apr_pstrcat(r->pool,
-+ CRLF "--", boundary,
-+ CRLF "Content-range: bytes ",
-+ NULL);
- }
-- ap_xlate_proto_to_ascii(ctx->bound_head, strlen(ctx->bound_head));
-+ ap_xlate_proto_to_ascii(bound_head, strlen(bound_head));
- }
-
- /* this brigade holds what we will be sending */
- bsend = apr_brigade_create(r->pool, c->bucket_alloc);
-+ tmpbb = apr_brigade_create(r->pool, c->bucket_alloc);
-
-- while ((current = ap_getword(r->pool, &r->range, ','))
-- && (rv = parse_byterange(current, clength, &range_start,
-- &range_end))) {
-- apr_bucket *e2;
-- apr_bucket *ec;
-+ idx = (indexes_t *)indexes->elts;
-+ for (i = 0; i < indexes->nelts; i++, idx++) {
-+ range_start = idx->start;
-+ range_end = idx->end;
-
-- if (rv == -1) {
-- continue;
-- }
--
-- /* These calls to apr_brigage_partition should only fail in
-- * pathological cases, e.g. a file being truncated whilst
-- * being served. */
-- if ((rv = apr_brigade_partition(bb, range_start, &ec)) != APR_SUCCESS) {
-+ rv = copy_brigade_range(bb, tmpbb, range_start, range_end);
-+ if (rv != APR_SUCCESS ) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
-- PARTITION_ERR_FMT, range_start, clength);
-+ "copy_brigade_range() failed [%" APR_OFF_T_FMT
-+ "-%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]",
-+ range_start, range_end, clength);
- continue;
- }
-- if ((rv = apr_brigade_partition(bb, range_end+1, &e2)) != APR_SUCCESS) {
-- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
-- PARTITION_ERR_FMT, range_end+1, clength);
-- continue;
-- }
--
- found = 1;
-
-- /* For single range requests, we must produce Content-Range header.
-+ /*
-+ * For single range requests, we must produce Content-Range header.
- * Otherwise, we need to produce the multipart boundaries.
- */
-- if (ctx->num_ranges == 1) {
-+ if (num_ranges == 1) {
- apr_table_setn(r->headers_out, "Content-Range",
- apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
- range_start, range_end, clength));
-@@ -251,7 +356,7 @@
- else {
- char *ts;
-
-- e = apr_bucket_pool_create(ctx->bound_head, strlen(ctx->bound_head),
-+ e = apr_bucket_pool_create(bound_head, strlen(bound_head),
- r->pool, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
-
-@@ -263,23 +368,19 @@
- APR_BRIGADE_INSERT_TAIL(bsend, e);
- }
-
-- do {
-- apr_bucket *foo;
-- const char *str;
-- apr_size_t len;
--
-- if (apr_bucket_copy(ec, &foo) != APR_SUCCESS) {
-- /* As above; this should not fail since the bucket has
-- * a known length, but just to be sure, this takes
-- * care of uncopyable buckets that do somehow manage
-- * to slip through. */
-- /* XXX: check for failure? */
-- apr_bucket_read(ec, &str, &len, APR_BLOCK_READ);
-- apr_bucket_copy(ec, &foo);
-- }
-- APR_BRIGADE_INSERT_TAIL(bsend, foo);
-- ec = APR_BUCKET_NEXT(ec);
-- } while (ec != e2);
-+ APR_BRIGADE_CONCAT(bsend, tmpbb);
-+ if (i && !(i & 0x1F)) {
-+ /*
-+ * Every now and then, pass what we have down the filter chain.
-+ * In this case, the content-length filter cannot calculate and
-+ * set the content length and we must remove any Content-Length
-+ * header already present.
-+ */
-+ apr_table_unset(r->headers_out, "Content-Length");
-+ if ((rv = ap_pass_brigade(f->next, bsend)) != APR_SUCCESS)
-+ return rv;
-+ apr_brigade_cleanup(bsend);
-+ }
- }
-
- if (found == 0) {
-@@ -294,11 +395,11 @@
- return ap_pass_brigade(f->next, bsend);
- }
-
-- if (ctx->num_ranges > 1) {
-+ if (num_ranges > 1) {
- char *end;
-
- /* add the final boundary */
-- end = apr_pstrcat(r->pool, CRLF "--", ctx->boundary, "--" CRLF, NULL);
-+ end = apr_pstrcat(r->pool, CRLF "--", boundary, "--" CRLF, NULL);
- ap_xlate_proto_to_ascii(end, strlen(end));
- e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
-@@ -309,24 +410,32 @@
-
- /* we're done with the original content - all of our data is in bsend. */
- apr_brigade_cleanup(bb);
-+ apr_brigade_destroy(tmpbb);
-
- /* send our multipart output */
- return ap_pass_brigade(f->next, bsend);
- }
-
--static int ap_set_byterange(request_rec *r)
-+static int ap_set_byterange(request_rec *r, apr_off_t clength,
-+ apr_array_header_t **indexes)
- {
- const char *range;
- const char *if_range;
- const char *match;
- const char *ct;
-- int num_ranges;
-+ char *cur;
-+ int num_ranges = 0;
-+ apr_off_t sum_lengths = 0;
-+ indexes_t *idx;
-+ int ranges = 1;
-+ const char *it;
-
- if (r->assbackwards) {
- return 0;
- }
-
-- /* Check for Range request-header (HTTP/1.1) or Request-Range for
-+ /*
-+ * Check for Range request-header (HTTP/1.1) or Request-Range for
- * backwards-compatibility with second-draft Luotonen/Franks
- * byte-ranges (e.g. Netscape Navigator 2-3).
- *
-@@ -356,7 +465,8 @@
- return 0;
- }
-
-- /* Check the If-Range header for Etag or Date.
-+ /*
-+ * Check the If-Range header for Etag or Date.
- * Note that this check will return false (as required) if either
- * of the two etags are weak.
- */
-@@ -373,17 +483,77 @@
- }
- }
-
-- if (!ap_strchr_c(range, ',')) {
-- /* a single range */
-- num_ranges = 1;
-- }
-- else {
-- /* a multiple range */
-- num_ranges = 2;
-+ range += 6;
-+ it = range;
-+ while (*it) {
-+ if (*it++ == ',') {
-+ ranges++;
-+ }
-+ }
-+ it = range;
-+ *indexes = apr_array_make(r->pool, ranges, sizeof(indexes_t));
-+ while ((cur = ap_getword(r->pool, &range, ','))) {
-+ char *dash;
-+ char *errp;
-+ apr_off_t number, start, end;
-+
-+ if (!(dash = strchr(cur, '-'))) {
-+ break;
-+ }
-+
-+ if (dash == range) {
-+ /* In the form "-5" */
-+ if (apr_strtoff(&number, dash+1, &errp, 10) || *errp) {
-+ break;
-+ }
-+ start = clength - number;
-+ end = clength - 1;
-+ }
-+ else {
-+ *dash++ = '\0';
-+ if (apr_strtoff(&number, cur, &errp, 10) || *errp) {
-+ break;
-+ }
-+ start = number;
-+ if (*dash) {
-+ if (apr_strtoff(&number, dash, &errp, 10) || *errp) {
-+ break;
-+ }
-+ end = number;
-+ }
-+ else { /* "5-" */
-+ end = clength - 1;
-+ }
-+ }
-+
-+ if (start < 0) {
-+ start = 0;
-+ }
-+ if (end >= clength) {
-+ end = clength - 1;
-+ }
-+
-+ if (start > end) {
-+ /* ignore? count? */
-+ break;
-+ }
-+
-+ idx = (indexes_t *)apr_array_push(*indexes);
-+ idx->start = start;
-+ idx->end = end;
-+ sum_lengths += end - start + 1;
-+ /* new set again */
-+ num_ranges++;
-+ }
-+
-+ if (sum_lengths >= clength) {
-+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
-+ "Sum of ranges not smaller than file, ignoring.");
-+ return 0;
- }
-
- r->status = HTTP_PARTIAL_CONTENT;
-- r->range = range + 6;
-+ r->range = it;
-
- return num_ranges;
- }
diff --git a/www/apache22/patches/patch-lock.c b/www/apache22/patches/patch-lock.c
new file mode 100644
index 00000000000..fbad547b90d
--- /dev/null
+++ b/www/apache22/patches/patch-lock.c
@@ -0,0 +1,58 @@
+$NetBSD: patch-lock.c,v 1.1.2.2 2011/09/14 18:03:18 tron Exp $
+
+Atomically create files when using DAV to stop files being deleted on error
+
+From:
+https://issues.apache.org/bugzilla/show_bug.cgi?id=39815
+
+--- modules/dav/fs/lock.c.orig 2007-11-29 21:21:10.000000000 +0100
++++ modules/dav/fs/lock.c 2009-07-10 13:42:43.000000000 +0200
+@@ -398,46 +398,48 @@
+ ** to look up lock information for this file.
+ **
+ ** (inode/dev not supported or file is lock-null):
+ ** apr_datum_t->dvalue = full path
+ **
+ ** (inode/dev supported and file exists ):
+ ** apr_datum_t->dvalue = inode, dev
+ */
+ static apr_datum_t dav_fs_build_key(apr_pool_t *p,
+ const dav_resource *resource)
+ {
+ const char *file = dav_fs_pathname(resource);
++#if 0
+ apr_datum_t key;
+ apr_finfo_t finfo;
+ apr_status_t rv;
+
+ /* ### use lstat() ?? */
+ /*
+ * XXX: What for platforms with no IDENT (dev/inode)?
+ */
+ rv = apr_stat(&finfo, file, APR_FINFO_IDENT, p);
+ if ((rv == APR_SUCCESS || rv == APR_INCOMPLETE)
+ && ((finfo.valid & APR_FINFO_IDENT) == APR_FINFO_IDENT))
+ {
+ /* ### can we use a buffer for this? */
+ key.dsize = 1 + sizeof(finfo.inode) + sizeof(finfo.device);
+ key.dptr = apr_palloc(p, key.dsize);
+ *key.dptr = DAV_TYPE_INODE;
+ memcpy(key.dptr + 1, &finfo.inode, sizeof(finfo.inode));
+ memcpy(key.dptr + 1 + sizeof(finfo.inode), &finfo.device,
+ sizeof(finfo.device));
+
+ return key;
+ }
++#endif
+
+ return dav_fs_build_fname_key(p, file);
+ }
+
+ /*
+ ** dav_fs_lock_expired: return 1 (true) if the given timeout is in the past
+ ** or present (the lock has expired), or 0 (false) if in the future
+ ** (the lock has not yet expired).
+ */
+ static int dav_fs_lock_expired(time_t expires)
+ {
+ return expires != DAV_TIMEOUT_INFINITE && time(NULL) >= expires;
diff --git a/www/apache22/patches/patch-repos.c b/www/apache22/patches/patch-repos.c
new file mode 100644
index 00000000000..d04ea591e03
--- /dev/null
+++ b/www/apache22/patches/patch-repos.c
@@ -0,0 +1,99 @@
+$NetBSD: patch-repos.c,v 1.1.2.2 2011/09/14 18:03:18 tron Exp $
+
+Atomically create files when using DAV to stop files being deleted on error
+
+From:
+https://issues.apache.org/bugzilla/show_bug.cgi?id=39815
+
+--- modules/dav/fs/repos.c.orig 2008-08-16 00:12:47.000000000 +0200
++++ modules/dav/fs/repos.c 2009-07-10 19:01:24.000000000 +0200
+@@ -191,6 +191,7 @@
+ apr_pool_t *p;
+ apr_file_t *f;
+ const char *pathname; /* we may need to remove it at close time */
++ const char *temppath;
+ };
+
+ /* returns an appropriate HTTP status code given an APR status code for a
+@@ -841,6 +842,14 @@
+ && ctx2->pathname[len1] == '/');
+ }
+
++static apr_status_t tmpfile_cleanup(void *data) {
++ dav_stream *ds = data;
++ if (ds->temppath) {
++ apr_file_remove(ds->temppath, ds->p);
++ }
++ return APR_SUCCESS;
++}
++
+ static dav_error * dav_fs_open_stream(const dav_resource *resource,
+ dav_stream_mode mode,
+ dav_stream **stream)
+@@ -849,6 +858,7 @@
+ dav_stream *ds = apr_pcalloc(p, sizeof(*ds));
+ apr_int32_t flags;
+ apr_status_t rv;
++ char* fpath;
+
+ switch (mode) {
+ default:
+@@ -865,7 +875,18 @@
+
+ ds->p = p;
+ ds->pathname = resource->info->pathname;
+- rv = apr_file_open(&ds->f, ds->pathname, flags, APR_OS_DEFAULT, ds->p);
++ ds->temppath = NULL;
++
++ if (mode == DAV_MODE_WRITE_TRUNC) {
++ fpath = apr_pstrcat(p, ds->pathname, ".tmp.XXXXXX", NULL);
++ rv = apr_file_mktemp(&ds->f, fpath, flags, ds->p);
++ ds->temppath = fpath;
++ apr_pool_cleanup_register(p, ds, tmpfile_cleanup, apr_pool_cleanup_null);
++ }
++ else {
++ rv = apr_file_open(&ds->f, ds->pathname, flags, APR_OS_DEFAULT, ds->p);
++ }
++
+ if (rv != APR_SUCCESS) {
+ return dav_new_error(p, MAP_IO2HTTP(rv), 0,
+ "An error occurred while opening a resource.");
+@@ -879,16 +900,32 @@
+
+ static dav_error * dav_fs_close_stream(dav_stream *stream, int commit)
+ {
++ apr_status_t rv;
++
+ apr_file_close(stream->f);
+
+ if (!commit) {
+- if (apr_file_remove(stream->pathname, stream->p) != APR_SUCCESS) {
+- /* ### use a better description? */
+- return dav_new_error(stream->p, HTTP_INTERNAL_SERVER_ERROR, 0,
+- "There was a problem removing (rolling "
+- "back) the resource "
+- "when it was being closed.");
++ if (stream->temppath) {
++ apr_pool_cleanup_run(stream->p, stream, tmpfile_cleanup);
++ }
++ else {
++ if (apr_file_remove(stream->pathname, stream->p) != APR_SUCCESS) {
++ /* ### use a better description? */
++ return dav_new_error(stream->p, HTTP_INTERNAL_SERVER_ERROR, 0,
++ "There was a problem removing (rolling "
++ "back) the resource "
++ "when it was being closed.");
++ }
++ }
++ }
++ else if (stream->temppath) {
++ rv = apr_file_rename(stream->temppath, stream->pathname, stream->p);
++ if (rv) {
++ return dav_new_error(stream->p, HTTP_INTERNAL_SERVER_ERROR, rv,
++ "There was a problem writing the file "
++ "atomically after writes.");
+ }
++ apr_pool_cleanup_kill(stream->p, stream, tmpfile_cleanup);
+ }
+
+ return NULL;