summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorStefan Fritsch <sf@sfritsch.de>2011-12-27 19:42:22 +0100
committerStefan Fritsch <sf@sfritsch.de>2011-12-27 19:42:22 +0100
commit0268977037115539ad65a26e858aa0df8d18cd13 (patch)
treef761b541b04d08b75e32efc6c293111c61a8b79c /modules
parent9e615cb6aa4afcee97f8a1646e5a586261a7b81f (diff)
downloadapache2-upstream/2.2.9.tar.gz
Upstream tarball 2.2.9upstream/2.2.9
Diffstat (limited to 'modules')
-rw-r--r--modules/aaa/config.m45
-rw-r--r--modules/aaa/mod_authn_dbd.c38
-rw-r--r--modules/aaa/mod_authz_host.c5
-rw-r--r--modules/cache/cache_cache.c14
-rw-r--r--modules/cache/cache_cache.h14
-rw-r--r--modules/cache/cache_hash.c16
-rw-r--r--modules/cache/cache_hash.h16
-rw-r--r--modules/cache/cache_storage.c7
-rw-r--r--modules/cache/cache_util.c8
-rw-r--r--modules/cache/mod_cache.c6
-rw-r--r--modules/cache/mod_cache.dsp24
-rw-r--r--modules/cache/mod_mem_cache.dsp32
-rw-r--r--modules/dav/main/mod_dav.c5
-rw-r--r--modules/filters/mod_charset_lite.c16
-rw-r--r--modules/filters/mod_include.c3
-rw-r--r--modules/filters/mod_substitute.c22
-rw-r--r--modules/generators/mod_cgid.c36
-rw-r--r--modules/http/http_filters.c125
-rw-r--r--modules/ldap/README.ldap2
-rw-r--r--modules/ldap/config.m45
-rw-r--r--modules/ldap/util_ldap.c20
-rw-r--r--modules/loggers/mod_log_config.c22
-rw-r--r--modules/loggers/mod_log_forensic.c4
-rw-r--r--modules/loggers/mod_logio.c11
-rw-r--r--modules/mappers/mod_rewrite.c34
-rw-r--r--modules/mappers/mod_speling.c6
-rw-r--r--modules/metadata/mod_headers.c50
-rw-r--r--modules/metadata/mod_unique_id.c2
-rw-r--r--modules/proxy/ajp.h1
-rw-r--r--modules/proxy/ajp_utils.c2
-rw-r--r--modules/proxy/mod_proxy.c209
-rw-r--r--modules/proxy/mod_proxy.h22
-rw-r--r--modules/proxy/mod_proxy_ajp.c91
-rw-r--r--modules/proxy/mod_proxy_balancer.c65
-rw-r--r--modules/proxy/mod_proxy_ftp.c7
-rw-r--r--modules/proxy/mod_proxy_http.c228
-rw-r--r--modules/proxy/proxy_util.c285
-rw-r--r--modules/ssl/mod_ssl.c11
38 files changed, 1133 insertions, 336 deletions
diff --git a/modules/aaa/config.m4 b/modules/aaa/config.m4
index e2c057d2..3cdd0a26 100644
--- a/modules/aaa/config.m4
+++ b/modules/aaa/config.m4
@@ -34,7 +34,10 @@ APACHE_MODULE(authz_owner, 'require file-owner' authorization control, , , most)
dnl LDAP authentication module. This module has both the authn and authz
dnl modules in one, so as to share the LDAP server config directives.
-APACHE_MODULE(authnz_ldap, LDAP based authentication, , , no)
+APACHE_MODULE(authnz_ldap, LDAP based authentication, , , no, [
+ MOD_AUTHNZ_LDAP_LDADD="`$apu_config --ldap-libs`" || MOD_AUTHNZ_LDAP_LDADD=""
+ AC_SUBST(MOD_AUTHNZ_LDAP_LDADD)
+])
dnl - and just in case all of the above punt; a default handler to
dnl keep the bad guys out.
diff --git a/modules/aaa/mod_authn_dbd.c b/modules/aaa/mod_authn_dbd.c
index 3bcde864..3341171e 100644
--- a/modules/aaa/mod_authn_dbd.c
+++ b/modules/aaa/mod_authn_dbd.c
@@ -98,24 +98,29 @@ static authn_status authn_dbd_password(request_rec *r, const char *user,
ap_dbd_t *dbd = authn_dbd_acquire_fn(r);
if (dbd == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "Error looking up %s in database", user);
+ "Failed to acquire database connection to look up "
+ "user '%s'", user);
return AUTH_GENERAL_ERROR;
}
if (conf->user == NULL) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "No AuthDBDUserPWQuery has been specified.");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "No AuthDBDUserPWQuery has been specified");
return AUTH_GENERAL_ERROR;
}
statement = apr_hash_get(dbd->prepared, conf->user, APR_HASH_KEY_STRING);
if (statement == NULL) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "A prepared statement could not be found for AuthDBDUserPWQuery, key '%s'.", conf->user);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "A prepared statement could not be found for "
+ "AuthDBDUserPWQuery with the key '%s'", conf->user);
return AUTH_GENERAL_ERROR;
}
if (apr_dbd_pvselect(dbd->driver, r->pool, dbd->handle, &res, statement,
0, user, NULL) != 0) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "Error looking up %s in database", user);
+ "Query execution error looking up '%s' "
+ "in database", user);
return AUTH_GENERAL_ERROR;
}
for (rv = apr_dbd_get_row(dbd->driver, r->pool, res, &row, -1);
@@ -123,12 +128,11 @@ static authn_status authn_dbd_password(request_rec *r, const char *user,
rv = apr_dbd_get_row(dbd->driver, r->pool, res, &row, -1)) {
if (rv != 0) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
- "Error looking up %s in database", user);
+ "Error retrieving results while looking up '%s' "
+ "in database", user);
return AUTH_GENERAL_ERROR;
}
if (dbd_password == NULL) {
- dbd_password = apr_dbd_get_entry(dbd->driver, row, 0);
-
#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 3)
/* add the rest of the columns to the environment */
int i = 1;
@@ -155,6 +159,7 @@ static authn_status authn_dbd_password(request_rec *r, const char *user,
i++;
}
#endif
+ dbd_password = apr_dbd_get_entry(dbd->driver, row, 0);
}
/* we can't break out here or row won't get cleaned up */
}
@@ -185,22 +190,27 @@ static authn_status authn_dbd_realm(request_rec *r, const char *user,
ap_dbd_t *dbd = authn_dbd_acquire_fn(r);
if (dbd == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "Error looking up %s in database", user);
+ "Failed to acquire database connection to look up "
+ "user '%s:%s'", user, realm);
return AUTH_GENERAL_ERROR;
}
if (conf->realm == NULL) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "No AuthDBDUserRealmQuery has been specified.");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "No AuthDBDUserRealmQuery has been specified");
return AUTH_GENERAL_ERROR;
}
statement = apr_hash_get(dbd->prepared, conf->realm, APR_HASH_KEY_STRING);
if (statement == NULL) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "A prepared statement could not be found for AuthDBDUserRealmQuery, key '%s'.", conf->realm);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "A prepared statement could not be found for "
+ "AuthDBDUserRealmQuery with the key '%s'", conf->realm);
return AUTH_GENERAL_ERROR;
}
if (apr_dbd_pvselect(dbd->driver, r->pool, dbd->handle, &res, statement,
0, user, realm, NULL) != 0) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "Error looking up %s:%s in database", user, realm);
+ "Query execution error looking up '%s:%s' "
+ "in database", user, realm);
return AUTH_GENERAL_ERROR;
}
for (rv = apr_dbd_get_row(dbd->driver, r->pool, res, &row, -1);
@@ -208,12 +218,11 @@ static authn_status authn_dbd_realm(request_rec *r, const char *user,
rv = apr_dbd_get_row(dbd->driver, r->pool, res, &row, -1)) {
if (rv != 0) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
- "Error looking up %s in database", user);
+ "Error retrieving results while looking up '%s:%s' "
+ "in database", user, realm);
return AUTH_GENERAL_ERROR;
}
if (dbd_hash == NULL) {
- dbd_hash = apr_dbd_get_entry(dbd->driver, row, 0);
-
#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 3)
/* add the rest of the columns to the environment */
int i = 1;
@@ -240,6 +249,7 @@ static authn_status authn_dbd_realm(request_rec *r, const char *user,
i++;
}
#endif
+ dbd_hash = apr_dbd_get_entry(dbd->driver, row, 0);
}
/* we can't break out here or row won't get cleaned up */
}
diff --git a/modules/aaa/mod_authz_host.c b/modules/aaa/mod_authz_host.c
index 5b9ec338..a502951c 100644
--- a/modules/aaa/mod_authz_host.c
+++ b/modules/aaa/mod_authz_host.c
@@ -297,8 +297,9 @@ static int check_dir_access(request_rec *r)
if (ret == HTTP_FORBIDDEN
&& (ap_satisfies(r) != SATISFY_ANY || !ap_some_auth_required(r))) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "client denied by server configuration: %s",
- r->filename);
+ "client denied by server configuration: %s%s",
+ r->filename ? "" : "uri ",
+ r->filename ? r->filename : r->uri);
}
return ret;
diff --git a/modules/cache/cache_cache.c b/modules/cache/cache_cache.c
index 860800bb..4fc95d73 100644
--- a/modules/cache/cache_cache.c
+++ b/modules/cache/cache_cache.c
@@ -44,7 +44,7 @@ struct cache_cache_t {
cache_cache_free *free_entry;
};
-CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
+cache_cache_t* cache_init(int max_entries,
apr_size_t max_size,
cache_pqueue_get_priority get_pri,
cache_pqueue_set_priority set_pri,
@@ -75,7 +75,7 @@ CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
return tmp;
}
-CACHE_DECLARE(void) cache_free(cache_cache_t *c)
+void cache_free(cache_cache_t *c)
{
cache_pq_free(c->pq);
cache_hash_free(c->ht);
@@ -83,12 +83,12 @@ CACHE_DECLARE(void) cache_free(cache_cache_t *c)
}
-CACHE_DECLARE(void*) cache_find(cache_cache_t* c, const char *key)
+void* cache_find(cache_cache_t* c, const char *key)
{
return cache_hash_get(c->ht, key, CACHE_HASH_KEY_STRING);
}
-CACHE_DECLARE(void) cache_update(cache_cache_t* c, void *entry)
+void cache_update(cache_cache_t* c, void *entry)
{
long old_priority;
long new_priority;
@@ -99,7 +99,7 @@ CACHE_DECLARE(void) cache_update(cache_cache_t* c, void *entry)
cache_pq_change_priority(c->pq, old_priority, new_priority, entry);
}
-CACHE_DECLARE(void) cache_insert(cache_cache_t* c, void *entry)
+void cache_insert(cache_cache_t* c, void *entry)
{
void *ejected = NULL;
long priority;
@@ -132,7 +132,7 @@ CACHE_DECLARE(void) cache_insert(cache_cache_t* c, void *entry)
cache_hash_set(c->ht, c->key_entry(entry), CACHE_HASH_KEY_STRING, entry);
}
-CACHE_DECLARE(void *) cache_pop(cache_cache_t *c)
+void* cache_pop(cache_cache_t *c)
{
void *entry;
@@ -150,7 +150,7 @@ CACHE_DECLARE(void *) cache_pop(cache_cache_t *c)
return entry;
}
-CACHE_DECLARE(apr_status_t) cache_remove(cache_cache_t *c, void *entry)
+apr_status_t cache_remove(cache_cache_t *c, void *entry)
{
apr_size_t entry_size = c->size_entry(entry);
apr_status_t rc;
diff --git a/modules/cache/cache_cache.h b/modules/cache/cache_cache.h
index 042c5d50..e805cf20 100644
--- a/modules/cache/cache_cache.h
+++ b/modules/cache/cache_cache.h
@@ -57,7 +57,7 @@ typedef void cache_cache_free(void *a);
* @param key_entry callback to get the key of a entry
* @param free_entry callback to free an entry
*/
-CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
+cache_cache_t* cache_init(int max_entries,
apr_size_t max_size,
cache_pqueue_get_priority get_pri,
cache_pqueue_set_priority set_pri,
@@ -72,37 +72,37 @@ CACHE_DECLARE(cache_cache_t *)cache_init(int max_entries,
* free up the cache
* @param c the cache
*/
-CACHE_DECLARE(void) cache_free(cache_cache_t *c);
+void cache_free(cache_cache_t *c);
/**
* find a entry in the cache, incrementing the frequency if found
* @param c the cache
* @param key the key
*/
-CACHE_DECLARE(void*) cache_find(cache_cache_t* c, const char *key);
+void* cache_find(cache_cache_t* c, const char *key);
/**
* insert a entry into the cache
* @param c the cache
* @param entry the entry
*/
-CACHE_DECLARE(void) cache_update(cache_cache_t* c, void *entry);
+void cache_update(cache_cache_t* c, void *entry);
/**
* insert a entry into the cache
* @param c the cache
* @param entry the entry
*/
-CACHE_DECLARE(void) cache_insert(cache_cache_t* c, void *entry);
+void cache_insert(cache_cache_t* c, void *entry);
/**
* pop the lowest priority item off
* @param c the cache
* @returns the entry or NULL
*/
-CACHE_DECLARE(void *)cache_pop(cache_cache_t* c);
+void* cache_pop(cache_cache_t* c);
/**
* remove an item from the cache
* @param c the cache
* @param entry the actual entry (from a find)
*/
-CACHE_DECLARE(apr_status_t) cache_remove(cache_cache_t* c, void *entry);
+apr_status_t cache_remove(cache_cache_t* c, void *entry);
#ifdef __cplusplus
}
#endif
diff --git a/modules/cache/cache_hash.c b/modules/cache/cache_hash.c
index 2ac26ec8..202cf9f7 100644
--- a/modules/cache/cache_hash.c
+++ b/modules/cache/cache_hash.c
@@ -80,7 +80,7 @@ static cache_hash_entry_t **alloc_array(cache_hash_t *ht, int max)
return calloc(1, sizeof(*ht->array) * (max + 1));
}
-CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size)
+cache_hash_t* cache_hash_make(apr_size_t size)
{
cache_hash_t *ht;
ht = malloc(sizeof(cache_hash_t));
@@ -97,7 +97,7 @@ CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size)
return ht;
}
-CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht)
+void cache_hash_free(cache_hash_t *ht)
{
if (ht) {
if (ht->array) {
@@ -110,7 +110,7 @@ CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht)
* Hash iteration functions.
*/
-CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi)
+cache_hash_index_t* cache_hash_next(cache_hash_index_t *hi)
{
hi->this = hi->next;
while (!hi->this) {
@@ -122,7 +122,7 @@ CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi)
return hi;
}
-CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht)
+cache_hash_index_t* cache_hash_first(cache_hash_t *ht)
{
cache_hash_index_t *hi;
@@ -134,7 +134,7 @@ CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht)
return cache_hash_next(hi);
}
-CACHE_DECLARE(void) cache_hash_this(cache_hash_index_t *hi,
+void cache_hash_this(cache_hash_index_t *hi,
const void **key,
apr_ssize_t *klen,
void **val)
@@ -240,7 +240,7 @@ static cache_hash_entry_t **find_entry(cache_hash_t *ht,
return hep;
}
-CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht,
+void* cache_hash_get(cache_hash_t *ht,
const void *key,
apr_ssize_t klen)
{
@@ -252,7 +252,7 @@ CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht,
return NULL;
}
-CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht,
+void* cache_hash_set(cache_hash_t *ht,
const void *key,
apr_ssize_t klen,
const void *val)
@@ -284,7 +284,7 @@ CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht,
return NULL;
}
-CACHE_DECLARE(int) cache_hash_count(cache_hash_t *ht)
+int cache_hash_count(cache_hash_t *ht)
{
return ht->count;
}
diff --git a/modules/cache/cache_hash.h b/modules/cache/cache_hash.h
index 4138aca7..13a5eb4c 100644
--- a/modules/cache/cache_hash.h
+++ b/modules/cache/cache_hash.h
@@ -59,7 +59,7 @@ typedef struct cache_hash_index_t cache_hash_index_t;
* @param size
* @return The hash table just created
*/
-CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size);
+cache_hash_t* cache_hash_make(apr_size_t size);
/**
* Create a hash table.
@@ -70,7 +70,7 @@ CACHE_DECLARE(cache_hash_t *) cache_hash_make(apr_size_t size);
* not removed from the cache prior to calling cache_hash_free()
* will be unaccessable.
*/
-CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht);
+void cache_hash_free(cache_hash_t *ht);
/**
@@ -82,7 +82,7 @@ CACHE_DECLARE(void) cache_hash_free(cache_hash_t *ht);
* @remark If the value is NULL the hash entry is deleted.
* @return The value of the deleted cache entry (so the caller can clean it up).
*/
-CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht, const void *key,
+void* cache_hash_set(cache_hash_t *ht, const void *key,
apr_ssize_t klen, const void *val);
/**
@@ -92,7 +92,7 @@ CACHE_DECLARE(void *) cache_hash_set(cache_hash_t *ht, const void *key,
* @param klen Length of the key. Can be CACHE_HASH_KEY_STRING to use the string length.
* @return Returns NULL if the key is not present.
*/
-CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht, const void *key,
+void* cache_hash_get(cache_hash_t *ht, const void *key,
apr_ssize_t klen);
/**
@@ -121,7 +121,7 @@ CACHE_DECLARE(void *) cache_hash_get(cache_hash_t *ht, const void *key,
* progress at the same time.
* </PRE>
*/
-CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht);
+cache_hash_index_t* cache_hash_first(cache_hash_t *ht);
/**
* Continue iterating over the entries in a hash table.
@@ -129,7 +129,7 @@ CACHE_DECLARE(cache_hash_index_t *) cache_hash_first(cache_hash_t *ht);
* @return a pointer to the updated iteration state. NULL if there are no more
* entries.
*/
-CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi);
+cache_hash_index_t* cache_hash_next(cache_hash_index_t *hi);
/**
* Get the current entry's details from the iteration state.
@@ -140,7 +140,7 @@ CACHE_DECLARE(cache_hash_index_t *) cache_hash_next(cache_hash_index_t *hi);
* @remark The return pointers should point to a variable that will be set to the
* corresponding data, or they may be NULL if the data isn't interesting.
*/
-CACHE_DECLARE(void) cache_hash_this(cache_hash_index_t *hi, const void **key,
+void cache_hash_this(cache_hash_index_t *hi, const void **key,
apr_ssize_t *klen, void **val);
/**
@@ -148,7 +148,7 @@ CACHE_DECLARE(void) cache_hash_this(cache_hash_index_t *hi, const void **key,
* @param ht The hash table
* @return The number of key/value pairs in the hash table.
*/
-CACHE_DECLARE(int) cache_hash_count(cache_hash_t *ht);
+int cache_hash_count(cache_hash_t *ht);
/** @} */
diff --git a/modules/cache/cache_storage.c b/modules/cache/cache_storage.c
index 0ddf82dd..7b99f3ed 100644
--- a/modules/cache/cache_storage.c
+++ b/modules/cache/cache_storage.c
@@ -286,6 +286,13 @@ int cache_select(request_rec *r)
apr_table_unset(r->headers_in, "If-Range");
apr_table_unset(r->headers_in, "If-Unmodified-Since");
+ /*
+ * Do not do Range requests with our own conditionals: If
+ * we get 304 the Range does not matter and otherwise the
+ * entity changed and we want to have the complete entity
+ */
+ apr_table_unset(r->headers_in, "Range");
+
etag = apr_table_get(h->resp_hdrs, "ETag");
lastmod = apr_table_get(h->resp_hdrs, "Last-Modified");
diff --git a/modules/cache/cache_util.c b/modules/cache/cache_util.c
index 75d35bd6..78770ffd 100644
--- a/modules/cache/cache_util.c
+++ b/modules/cache/cache_util.c
@@ -235,6 +235,14 @@ CACHE_DECLARE(int) ap_cache_check_freshness(cache_handle_t *h,
cc_cresp = apr_table_get(h->resp_hdrs, "Cache-Control");
expstr = apr_table_get(h->resp_hdrs, "Expires");
+ if (ap_cache_liststr(NULL, cc_cresp, "no-cache", NULL)) {
+ /*
+ * The cached entity contained Cache-Control: no-cache, so treat as
+ * stale causing revalidation
+ */
+ return 0;
+ }
+
if ((agestr = apr_table_get(h->resp_hdrs, "Age"))) {
age_c = apr_atoi64(agestr);
}
diff --git a/modules/cache/mod_cache.c b/modules/cache/mod_cache.c
index 51341179..27df70d4 100644
--- a/modules/cache/mod_cache.c
+++ b/modules/cache/mod_cache.c
@@ -613,6 +613,12 @@ static int cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
cache->provider->remove_entity(cache->stale_handle);
/* Treat the request as if it wasn't conditional. */
cache->stale_handle = NULL;
+ /*
+ * Restore the original request headers as they may be needed
+ * by further output filters like the byterange filter to make
+ * the correct decisions.
+ */
+ r->headers_in = cache->stale_headers;
}
}
diff --git a/modules/cache/mod_cache.dsp b/modules/cache/mod_cache.dsp
index ba1653bf..40a1b34e 100644
--- a/modules/cache/mod_cache.dsp
+++ b/modules/cache/mod_cache.dsp
@@ -104,18 +104,6 @@ PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).ma
# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
# Begin Source File
-SOURCE=.\cache_cache.c
-# End Source File
-# Begin Source File
-
-SOURCE=.\cache_hash.c
-# End Source File
-# Begin Source File
-
-SOURCE=.\cache_pqueue.c
-# End Source File
-# Begin Source File
-
SOURCE=.\cache_storage.c
# End Source File
# Begin Source File
@@ -132,18 +120,6 @@ SOURCE=.\mod_cache.c
# PROP Default_Filter "h;hpp;hxx;hm;inl"
# Begin Source File
-SOURCE=.\cache_cache.h
-# End Source File
-# Begin Source File
-
-SOURCE=.\cache_hash.h
-# End Source File
-# Begin Source File
-
-SOURCE=.\cache_pqueue.h
-# End Source File
-# Begin Source File
-
SOURCE=.\mod_cache.h
# End Source File
# End Group
diff --git a/modules/cache/mod_mem_cache.dsp b/modules/cache/mod_mem_cache.dsp
index 003e4efc..7b8ebbb9 100644
--- a/modules/cache/mod_mem_cache.dsp
+++ b/modules/cache/mod_mem_cache.dsp
@@ -99,14 +99,46 @@ PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).ma
# Name "mod_mem_cache - Win32 Release"
# Name "mod_mem_cache - Win32 Debug"
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl"
+# Begin Source File
+
+SOURCE=.\cache_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_hash.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_pqueue.h
+# End Source File
# Begin Source File
SOURCE=.\mod_cache.h
# End Source File
+# End Group
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\cache_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_hash.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\cache_pqueue.c
+# End Source File
# Begin Source File
SOURCE=.\mod_mem_cache.c
# End Source File
+# End Group
# Begin Source File
SOURCE=..\..\build\win32\httpd.rc
diff --git a/modules/dav/main/mod_dav.c b/modules/dav/main/mod_dav.c
index 24699bc0..c57bffb3 100644
--- a/modules/dav/main/mod_dav.c
+++ b/modules/dav/main/mod_dav.c
@@ -2638,6 +2638,11 @@ static int dav_method_copymove(request_rec *r, int is_move)
"Destination URI had an error.");
}
+ if (dav_get_provider(lookup.rnew) == NULL) {
+ return dav_error_response(r, HTTP_METHOD_NOT_ALLOWED,
+ "DAV not enabled for Destination URI.");
+ }
+
/* Resolve destination resource */
err = dav_get_resource(lookup.rnew, 0 /* label_allowed */,
0 /* use_checked_in */, &resnew);
diff --git a/modules/filters/mod_charset_lite.c b/modules/filters/mod_charset_lite.c
index ed8ecbe5..a44ecc33 100644
--- a/modules/filters/mod_charset_lite.c
+++ b/modules/filters/mod_charset_lite.c
@@ -76,6 +76,8 @@ typedef struct charset_dir_t {
const char *charset_default; /* how to ship on wire */
/** module does ap_add_*_filter()? */
enum {IA_INIT, IA_IMPADD, IA_NOIMPADD} implicit_add;
+ /** treat all mimetypes as text? */
+ enum {FX_INIT, FX_FORCE, FX_NOFORCE} force_xlate;
} charset_dir_t;
/* charset_filter_ctx_t is created for each filter instance; because the same
@@ -138,6 +140,8 @@ static void *merge_charset_dir_conf(apr_pool_t *p, void *basev, void *overridesv
over->charset_source ? over->charset_source : base->charset_source;
a->implicit_add =
over->implicit_add != IA_INIT ? over->implicit_add : base->implicit_add;
+ a->force_xlate=
+ over->force_xlate != FX_INIT ? over->force_xlate : base->force_xlate;
return a;
}
@@ -176,6 +180,12 @@ static const char *add_charset_options(cmd_parms *cmd, void *in_dc,
else if (!strcasecmp(flag, "NoImplicitAdd")) {
dc->implicit_add = IA_NOIMPADD;
}
+ if (!strcasecmp(flag, "TranslateAllMimeTypes")) {
+ dc->force_xlate = FX_FORCE;
+ }
+ else if (!strcasecmp(flag, "NoTranslateAllMimeTypes")) {
+ dc->force_xlate = FX_NOFORCE;
+ }
else if (!strncasecmp(flag, "DebugLevel=", 11)) {
dc->debug = atoi(flag + 11);
}
@@ -803,7 +813,8 @@ static apr_status_t xlate_out_filter(ap_filter_t *f, apr_bucket_brigade *bb)
*/
strcmp(mime_type, DIR_MAGIC_TYPE) == 0 ||
#endif
- strncasecmp(mime_type, "message/", 8) == 0) {
+ strncasecmp(mime_type, "message/", 8) == 0 ||
+ dc->force_xlate == FX_FORCE) {
rv = apr_xlate_open(&ctx->xlate,
dc->charset_default, dc->charset_source, f->r->pool);
@@ -1091,7 +1102,8 @@ static const command_rec cmds[] =
add_charset_options,
NULL,
OR_FILEINFO,
- "valid options: ImplicitAdd, NoImplicitAdd, DebugLevel=n"),
+ "valid options: ImplicitAdd, NoImplicitAdd, TranslateAllMimeTypes, "
+ "NoTranslateAllMimeTypes, DebugLevel=n"),
{NULL}
};
diff --git a/modules/filters/mod_include.c b/modules/filters/mod_include.c
index db8b5faa..1c683880 100644
--- a/modules/filters/mod_include.c
+++ b/modules/filters/mod_include.c
@@ -3307,6 +3307,7 @@ static apr_status_t send_parsed_content(ap_filter_t *f, apr_bucket_brigade *bb)
if (store) {
if (index) {
APR_BUCKET_REMOVE(b);
+ apr_bucket_setaside(b, r->pool);
APR_BRIGADE_INSERT_TAIL(intern->tmp_bb, b);
b = newb;
}
@@ -3359,6 +3360,7 @@ static apr_status_t send_parsed_content(ap_filter_t *f, apr_bucket_brigade *bb)
if (store) {
if (index) {
APR_BUCKET_REMOVE(b);
+ apr_bucket_setaside(b, r->pool);
APR_BRIGADE_INSERT_TAIL(intern->tmp_bb, b);
b = newb;
}
@@ -3399,6 +3401,7 @@ static apr_status_t send_parsed_content(ap_filter_t *f, apr_bucket_brigade *bb)
default: /* partial match */
newb = APR_BUCKET_NEXT(b);
APR_BUCKET_REMOVE(b);
+ apr_bucket_setaside(b, r->pool);
APR_BRIGADE_INSERT_TAIL(intern->tmp_bb, b);
b = newb;
break;
diff --git a/modules/filters/mod_substitute.c b/modules/filters/mod_substitute.c
index 592d1404..ebe860d6 100644
--- a/modules/filters/mod_substitute.c
+++ b/modules/filters/mod_substitute.c
@@ -103,6 +103,7 @@ static void do_pattmatch(ap_filter_t *f, apr_bucket *inb,
apr_pool_t *tmp_pool)
{
int i;
+ int force_quick = 0;
ap_regmatch_t regm[AP_MAX_REG_MATCH];
apr_size_t bytes;
apr_size_t len;
@@ -128,6 +129,13 @@ static void do_pattmatch(ap_filter_t *f, apr_bucket *inb,
apr_pool_create(&tpool, tmp_pool);
scratch = NULL;
fbytes = 0;
+ /*
+ * Simple optimization. If we only have one pattern, then
+ * we can safely avoid the overhead of flattening
+ */
+ if (cfg->patterns->nelts == 1) {
+ force_quick = 1;
+ }
for (i = 0; i < cfg->patterns->nelts; i++) {
for (b = APR_BRIGADE_FIRST(mybb);
b != APR_BRIGADE_SENTINEL(mybb);
@@ -147,7 +155,7 @@ static void do_pattmatch(ap_filter_t *f, apr_bucket *inb,
{
/* get offset into buff for pattern */
len = (apr_size_t) (repl - buff);
- if (script->flatten) {
+ if (script->flatten && !force_quick) {
/*
* We are flattening the buckets here, meaning
* that we don't do the fast bucket splits.
@@ -181,7 +189,7 @@ static void do_pattmatch(ap_filter_t *f, apr_bucket *inb,
bytes -= len;
buff += len;
}
- if (script->flatten && s1) {
+ if (script->flatten && s1 && !force_quick) {
/*
* we've finished looking at the bucket, so remove the
* old one and add in our new one
@@ -219,7 +227,7 @@ static void do_pattmatch(ap_filter_t *f, apr_bucket *inb,
/* first, grab the replacement string */
repl = ap_pregsub(tmp_pool, script->replacement, p,
AP_MAX_REG_MATCH, regm);
- if (script->flatten) {
+ if (script->flatten && !force_quick) {
SEDSCAT(s1, s2, tmp_pool, p, regm[0].rm_so, repl);
}
else {
@@ -236,7 +244,7 @@ static void do_pattmatch(ap_filter_t *f, apr_bucket *inb,
*/
p += regm[0].rm_eo;
}
- if (script->flatten && s1) {
+ if (script->flatten && s1 && !force_quick) {
s1 = apr_pstrcat(tmp_pool, s1, p, NULL);
tmp_b = apr_bucket_transient_create(s1, strlen(s1),
f->r->connection->bucket_alloc);
@@ -488,7 +496,7 @@ static const char *set_pattern(cmd_parms *cmd, void *cfg, const char *line)
subst_pattern_t *nscript;
int is_pattern = 0;
int ignore_case = 0;
- int flatten = 0;
+ int flatten = 1;
ap_regex_t *r = NULL;
if (apr_tolower(*line) != 's') {
@@ -525,8 +533,10 @@ static const char *set_pattern(cmd_parms *cmd, void *cfg, const char *line)
is_pattern = 1;
else if (delim == 'f')
flatten = 1;
+ else if (delim == 'q')
+ flatten = 0;
else
- return "Bad Substitute flag, only s///[inf] are supported";
+ return "Bad Substitute flag, only s///[infq] are supported";
flags++;
}
diff --git a/modules/generators/mod_cgid.c b/modules/generators/mod_cgid.c
index db5c5048..edbdf7b7 100644
--- a/modules/generators/mod_cgid.c
+++ b/modules/generators/mod_cgid.c
@@ -93,6 +93,15 @@ static const char *sockname;
static pid_t parent_pid;
static ap_unix_identity_t empty_ugid = { (uid_t)-1, (gid_t)-1, -1 };
+/* The APR other-child API doesn't tell us how the daemon exited
+ * (SIGSEGV vs. exit(1)). The other-child maintenance function
+ * needs to decide whether to restart the daemon after a failure
+ * based on whether or not it exited due to a fatal startup error
+ * or something that happened at steady-state. This exit status
+ * is unlikely to collide with exit signals.
+ */
+#define DAEMON_STARTUP_ERROR 254
+
/* Read and discard the data in the brigade produced by a CGI script */
static void discard_script_output(apr_bucket_brigade *bb);
@@ -256,9 +265,15 @@ static void cgid_maint(int reason, void *data, apr_wait_t status)
stopping = 0;
}
if (!stopping) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL,
- "cgid daemon process died, restarting");
- cgid_start(root_pool, root_server, proc);
+ if (status == DAEMON_STARTUP_ERROR) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL,
+ "cgid daemon failed to initialize");
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL,
+ "cgid daemon process died, restarting");
+ cgid_start(root_pool, root_server, proc);
+ }
}
break;
case APR_OC_REASON_RESTART:
@@ -560,6 +575,7 @@ static int cgid_server(void *data)
apr_pool_t *ptrans;
server_rec *main_server = data;
apr_hash_t *script_hash = apr_hash_make(pcgi);
+ apr_status_t rv;
apr_pool_create(&ptrans, pcgi);
@@ -594,6 +610,15 @@ static int cgid_server(void *data)
return errno;
}
+ /* Not all flavors of unix use the current umask for AF_UNIX perms */
+ rv = apr_file_perms_set(sockname, APR_FPROT_UREAD|APR_FPROT_UWRITE|APR_FPROT_UEXECUTE);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, main_server,
+ "Couldn't set permissions on unix domain socket %s",
+ sockname);
+ return rv;
+ }
+
if (listen(sd, DEFAULT_CGID_LISTENBACKLOG) < 0) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server,
"Couldn't listen on unix domain socket");
@@ -780,7 +805,7 @@ static int cgid_server(void *data)
}
}
}
- return -1;
+ return -1; /* should be <= 0 to distinguish from startup errors */
}
static int cgid_start(apr_pool_t *p, server_rec *main_server,
@@ -797,8 +822,7 @@ static int cgid_start(apr_pool_t *p, server_rec *main_server,
if (pcgi == NULL) {
apr_pool_create(&pcgi, p);
}
- cgid_server(main_server);
- exit(-1);
+ exit(cgid_server(main_server) > 0 ? DAEMON_STARTUP_ERROR : -1);
}
procnew->pid = daemon_pid;
procnew->err = procnew->in = procnew->out = NULL;
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
index 7ad07ad6..9620fc9d 100644
--- a/modules/http/http_filters.c
+++ b/modules/http/http_filters.c
@@ -39,6 +39,7 @@
#include "http_main.h"
#include "http_request.h"
#include "http_vhost.h"
+#include "http_connection.h"
#include "http_log.h" /* For errors detected in basic auth common
* support code... */
#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
@@ -323,18 +324,23 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
(ctx->state == BODY_LENGTH && ctx->remaining > 0)) &&
f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1) &&
!(f->r->eos_sent || f->r->bytes_sent)) {
- char *tmp;
+ if (!ap_is_HTTP_SUCCESS(f->r->status)) {
+ ctx->state = BODY_NONE;
+ ctx->eos_sent = 1;
+ } else {
+ char *tmp;
- tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL, " ",
- ap_get_status_line(100), CRLF CRLF, NULL);
- apr_brigade_cleanup(bb);
- e = apr_bucket_pool_create(tmp, strlen(tmp), f->r->pool,
- f->c->bucket_alloc);
- APR_BRIGADE_INSERT_HEAD(bb, e);
- e = apr_bucket_flush_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
+ tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL, " ",
+ ap_get_status_line(100), CRLF CRLF, NULL);
+ apr_brigade_cleanup(bb);
+ e = apr_bucket_pool_create(tmp, strlen(tmp), f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ e = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
- ap_pass_brigade(f->c->output_filters, bb);
+ ap_pass_brigade(f->c->output_filters, bb);
+ }
}
/* We can't read the chunk until after sending 100 if required. */
@@ -420,6 +426,10 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
(APR_STATUS_IS_EAGAIN(rv)) )) {
return APR_EAGAIN;
}
+ /* If we get an error, then leave */
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
/*
* We really don't care whats on this line. If it is RFC
* compliant it should be only \r\n. If there is more
@@ -1047,12 +1057,23 @@ AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
/* Now we recreate the request, and echo it back */
bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ len = strlen(r->the_request);
+ tmp = apr_pmemdup(r->pool, r->the_request, len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_putstrs(bb, NULL, NULL, tmp, CRLF_ASCII, NULL);
+ }
+#else
apr_brigade_putstrs(bb, NULL, NULL, r->the_request, CRLF, NULL);
+#endif
h.pool = r->pool;
h.bb = bb;
apr_table_do((int (*) (void *, const char *, const char *))
form_header_field, (void *) &h, r->headers_in, NULL);
- apr_brigade_puts(bb, NULL, NULL, CRLF);
+ apr_brigade_puts(bb, NULL, NULL, CRLF_ASCII);
/* If configured to accept a body, echo the body */
if (bodylen) {
@@ -1082,6 +1103,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
header_struct h;
header_filter_ctx *ctx = f->ctx;
const char *ctype;
+ ap_bucket_error *eb = NULL;
AP_DEBUG_ASSERT(!r->main);
@@ -1099,13 +1121,23 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
- if (AP_BUCKET_IS_ERROR(e)) {
- ap_bucket_error *eb = e->data;
-
- ap_die(eb->status, r);
- return AP_FILTER_ERROR;
+ if (AP_BUCKET_IS_ERROR(e) && !eb) {
+ eb = e->data;
+ continue;
+ }
+ /*
+ * If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ if (AP_BUCKET_IS_EOC(e)) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
}
}
+ if (eb) {
+ ap_die(eb->status, r);
+ return AP_FILTER_ERROR;
+ }
if (r->assbackwards) {
r->sent_bodyct = 1;
@@ -1169,10 +1201,22 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
if (!apr_is_empty_array(r->content_languages)) {
int i;
+ char *token;
char **languages = (char **)(r->content_languages->elts);
- for (i = 0; i < r->content_languages->nelts; ++i) {
- apr_table_mergen(r->headers_out, "Content-Language", languages[i]);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!strcasecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
}
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
}
/*
@@ -1524,13 +1568,24 @@ AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
return bufsiz;
}
+/* Context struct for ap_http_outerror_filter */
+typedef struct {
+ int seen_eoc;
+} outerror_filter_ctx_t;
+
/* Filter to handle any error buckets on output */
apr_status_t ap_http_outerror_filter(ap_filter_t *f,
apr_bucket_brigade *b)
{
request_rec *r = f->r;
+ outerror_filter_ctx_t *ctx = (outerror_filter_ctx_t *)(f->ctx);
apr_bucket *e;
+ /* Create context if none is present */
+ if (!ctx) {
+ ctx = apr_pcalloc(r->pool, sizeof(outerror_filter_ctx_t));
+ f->ctx = ctx;
+ }
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
@@ -1544,6 +1599,40 @@ apr_status_t ap_http_outerror_filter(ap_filter_t *f,
/* stream aborted and we have not ended it yet */
r->connection->keepalive = AP_CONN_CLOSE;
}
+ continue;
+ }
+ /* Detect EOC buckets and memorize this in the context. */
+ if (AP_BUCKET_IS_EOC(e)) {
+ ctx->seen_eoc = 1;
+ }
+ }
+ /*
+ * Remove all data buckets that are in a brigade after an EOC bucket
+ * was seen, as an EOC bucket tells us that no (further) resource
+ * and protocol data should go out to the client. OTOH meta buckets
+ * are still welcome as they might trigger needed actions down in
+ * the chain (e.g. in network filters like SSL).
+ * Remark 1: It is needed to dump ALL data buckets in the brigade
+ * since an filter in between might have inserted data
+ * buckets BEFORE the EOC bucket sent by the original
+ * sender and we do NOT want this data to be sent.
+ * Remark 2: Dumping all data buckets here does not necessarily mean
+ * that no further data is send to the client as:
+ * 1. Network filters like SSL can still be triggered via
+ * meta buckets to talk with the client e.g. for a
+ * clean shutdown.
+ * 2. There could be still data that was buffered before
+ * down in the chain that gets flushed by a FLUSH or an
+ * EOS bucket.
+ */
+ if (ctx->seen_eoc) {
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (!APR_BUCKET_IS_METADATA(e)) {
+ APR_BUCKET_REMOVE(e);
+ }
}
}
diff --git a/modules/ldap/README.ldap b/modules/ldap/README.ldap
index 30ec5cc7..116707e0 100644
--- a/modules/ldap/README.ldap
+++ b/modules/ldap/README.ldap
@@ -38,7 +38,7 @@ Quick installation instructions (win32):
3. Compile the two modules util_ldap and mod_authnz_ldap using the dsp files
4. You get a mod_authnz_ldap.so and a mod_ldap.so module
5. Put them in the modules directory, don't forget to copy the
- nsldap32v50.dll somewhere where apache.exe will find it
+ nsldap32v50.dll somewhere where httpd.exe will find it
6. Load the two modules in your httpd.conf, like below:
LoadModule ldap_module modules/mod_ldap.so
LoadModule authnz_ldap_module modules/mod_authnz_ldap.so
diff --git a/modules/ldap/config.m4 b/modules/ldap/config.m4
index 25a92af4..a598d250 100644
--- a/modules/ldap/config.m4
+++ b/modules/ldap/config.m4
@@ -4,6 +4,9 @@ dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]])
APACHE_MODPATH_INIT(ldap)
ldap_objects="util_ldap.lo util_ldap_cache.lo util_ldap_cache_mgr.lo"
-APACHE_MODULE(ldap, LDAP caching and connection pooling services, $ldap_objects, , no)
+APACHE_MODULE(ldap, LDAP caching and connection pooling services, $ldap_objects, , no, [
+ MOD_LDAP_LDADD="`$apu_config --ldap-libs`" || MOD_LDAP_LDADD=""
+ AC_SUBST(MOD_LDAP_LDADD)
+])
APACHE_MODPATH_FINISH
diff --git a/modules/ldap/util_ldap.c b/modules/ldap/util_ldap.c
index 2f651931..5ea50d0a 100644
--- a/modules/ldap/util_ldap.c
+++ b/modules/ldap/util_ldap.c
@@ -212,7 +212,9 @@ static int uldap_connection_init(request_rec *r,
int rc = 0, ldap_option = 0;
int version = LDAP_VERSION3;
apr_ldap_err_t *result = NULL;
+#ifdef LDAP_OPT_NETWORK_TIMEOUT
struct timeval timeOut = {10,0}; /* 10 second connection timeout */
+#endif
util_ldap_state_t *st =
(util_ldap_state_t *)ap_get_module_config(r->server->module_config,
&ldap_module);
@@ -923,12 +925,10 @@ static int uldap_cache_checkuserid(request_rec *r, util_ldap_connection_t *ldc,
/* ...and entry is valid */
*binddn = apr_pstrdup(r->pool, search_nodep->dn);
if (attrs) {
- int i = 0, k = 0;
- while (attrs[k++]);
- *retvals = apr_pcalloc(r->pool, sizeof(char *) * k);
- while (search_nodep->vals[i]) {
+ int i;
+ *retvals = apr_pcalloc(r->pool, sizeof(char *) * search_nodep->numvals);
+ for (i = 0; i < search_nodep->numvals; i++) {
(*retvals)[i] = apr_pstrdup(r->pool, search_nodep->vals[i]);
- i++;
}
}
LDAP_CACHE_UNLOCK();
@@ -1172,12 +1172,10 @@ static int uldap_cache_getuserdn(request_rec *r, util_ldap_connection_t *ldc,
/* ...and entry is valid */
*binddn = apr_pstrdup(r->pool, search_nodep->dn);
if (attrs) {
- int i = 0, k = 0;
- while (attrs[k++]);
- *retvals = apr_pcalloc(r->pool, sizeof(char *) * k);
- while (search_nodep->vals[i]) {
+ int i;
+ *retvals = apr_pcalloc(r->pool, sizeof(char *) * search_nodep->numvals);
+ for (i = 0; i < search_nodep->numvals; i++) {
(*retvals)[i] = apr_pstrdup(r->pool, search_nodep->vals[i]);
- i++;
}
}
LDAP_CACHE_UNLOCK();
@@ -1773,9 +1771,11 @@ static const char *util_ldap_set_connection_timeout(cmd_parms *cmd,
void *dummy,
const char *ttl)
{
+#ifdef LDAP_OPT_NETWORK_TIMEOUT
util_ldap_state_t *st =
(util_ldap_state_t *)ap_get_module_config(cmd->server->module_config,
&ldap_module);
+#endif
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
diff --git a/modules/loggers/mod_log_config.c b/modules/loggers/mod_log_config.c
index 72bf5018..cd468be1 100644
--- a/modules/loggers/mod_log_config.c
+++ b/modules/loggers/mod_log_config.c
@@ -90,7 +90,9 @@
* %...l: remote logname (from identd, if supplied)
* %...{Foobar}n: The contents of note "Foobar" from another module.
* %...{Foobar}o: The contents of Foobar: header line(s) in the reply.
- * %...p: the port the request was served to
+ * %...p: the canonical port for the server
+ * %...{format}p: the canonical port for the server, or the actual local
+ * or remote port
* %...P: the process ID of the child that serviced the request.
* %...{format}P: the process ID or thread ID of the child/thread that
* serviced the request
@@ -633,8 +635,22 @@ static const char *log_virtual_host(request_rec *r, char *a)
static const char *log_server_port(request_rec *r, char *a)
{
- return apr_psprintf(r->pool, "%u",
- r->server->port ? r->server->port : ap_default_port(r));
+ apr_port_t port;
+
+ if (*a == '\0' || !strcasecmp(a, "canonical")) {
+ port = r->server->port ? r->server->port : ap_default_port(r);
+ }
+ else if (!strcasecmp(a, "remote")) {
+ port = r->connection->remote_addr->port;
+ }
+ else if (!strcasecmp(a, "local")) {
+ port = r->connection->local_addr->port;
+ }
+ else {
+ /* bogus format */
+ return a;
+ }
+ return apr_itoa(r->pool, (int)port);
}
/* This respects the setting of UseCanonicalName so that
diff --git a/modules/loggers/mod_log_forensic.c b/modules/loggers/mod_log_forensic.c
index 3fad35a7..f44f0b46 100644
--- a/modules/loggers/mod_log_forensic.c
+++ b/modules/loggers/mod_log_forensic.c
@@ -195,8 +195,8 @@ static int log_before(request_rec *r)
if (!(id = apr_table_get(r->subprocess_env, "UNIQUE_ID"))) {
/* we make the assumption that we can't go through all the PIDs in
under 1 second */
- id = apr_psprintf(r->pool, "%x:%lx:%x", getpid(), time(NULL),
- apr_atomic_inc32(&next_id));
+ id = apr_psprintf(r->pool, "%" APR_PID_T_FMT ":%lx:%x", getpid(),
+ time(NULL), apr_atomic_inc32(&next_id));
}
ap_set_module_config(r->request_config, &log_forensic_module, (char *)id);
diff --git a/modules/loggers/mod_logio.c b/modules/loggers/mod_logio.c
index 91db4f65..bc4d416c 100644
--- a/modules/loggers/mod_logio.c
+++ b/modules/loggers/mod_logio.c
@@ -66,6 +66,16 @@ static void ap_logio_add_bytes_out(conn_rec *c, apr_off_t bytes){
}
/*
+ * Optional function for modules to adjust bytes_in
+ */
+
+static void ap_logio_add_bytes_in(conn_rec *c, apr_off_t bytes){
+ logio_config_t *cf = ap_get_module_config(c->conn_config, &logio_module);
+
+ cf->bytes_in += bytes;
+}
+
+/*
* Format items...
*/
@@ -178,6 +188,7 @@ static void register_hooks(apr_pool_t *p)
AP_FTYPE_NETWORK - 1);
APR_REGISTER_OPTIONAL_FN(ap_logio_add_bytes_out);
+ APR_REGISTER_OPTIONAL_FN(ap_logio_add_bytes_in);
}
module AP_MODULE_DECLARE_DATA logio_module =
diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c
index e4d0e5d8..72860d95 100644
--- a/modules/mappers/mod_rewrite.c
+++ b/modules/mappers/mod_rewrite.c
@@ -227,6 +227,8 @@ typedef struct {
char *(*func)(request_rec *, /* function pointer for internal maps */
char *);
char **argv; /* argv of the external rewrite map */
+ const char *checkfile2; /* filename to check for map existence
+ NULL if only one file */
} rewritemap_entry;
/* special pattern types for RewriteCond */
@@ -1551,6 +1553,21 @@ static char *lookup_map(request_rec *r, char *name, char *key)
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"mod_rewrite: can't access DBM RewriteMap file %s",
s->checkfile);
+ }
+ else if(s->checkfile2 != NULL) {
+ apr_finfo_t st2;
+
+ rv = apr_stat(&st2, s->checkfile2, APR_FINFO_MIN, r->pool);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "mod_rewrite: can't access DBM RewriteMap "
+ "file %s", s->checkfile2);
+ }
+ else if(st2.mtime > st.mtime) {
+ st.mtime = st2.mtime;
+ }
+ }
+ if(rv != APR_SUCCESS) {
rewritelog((r, 1, NULL,
"can't open DBM RewriteMap file, see error log"));
return NULL;
@@ -2822,6 +2839,7 @@ static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1,
newmap->type = MAPTYPE_TXT;
newmap->datafile = fname;
newmap->checkfile = fname;
+ newmap->checkfile2= NULL;
newmap->cachename = apr_psprintf(cmd->pool, "%pp:%s",
(void *)cmd->server, a1);
}
@@ -2834,11 +2852,11 @@ static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1,
newmap->type = MAPTYPE_RND;
newmap->datafile = fname;
newmap->checkfile = fname;
+ newmap->checkfile2= NULL;
newmap->cachename = apr_psprintf(cmd->pool, "%pp:%s",
(void *)cmd->server, a1);
}
else if (strncasecmp(a2, "dbm", 3) == 0) {
- const char *ignored_fname;
apr_status_t rv;
newmap->type = MAPTYPE_DBM;
@@ -2873,7 +2891,7 @@ static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1,
rv = apr_dbm_get_usednames_ex(cmd->pool, newmap->dbmtype,
newmap->datafile, &newmap->checkfile,
- &ignored_fname);
+ &newmap->checkfile2);
if (rv != APR_SUCCESS) {
return apr_pstrcat(cmd->pool, "RewriteMap: dbm type ",
newmap->dbmtype, " is invalid", NULL);
@@ -2892,12 +2910,14 @@ static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1,
newmap->type = MAPTYPE_PRG;
newmap->datafile = NULL;
newmap->checkfile = newmap->argv[0];
+ newmap->checkfile2= NULL;
newmap->cachename = NULL;
}
else if (strncasecmp(a2, "int:", 4) == 0) {
newmap->type = MAPTYPE_INT;
newmap->datafile = NULL;
newmap->checkfile = NULL;
+ newmap->checkfile2= NULL;
newmap->cachename = NULL;
newmap->func = (char *(*)(request_rec *,char *))
apr_hash_get(mapfunc_hash, a2+4, strlen(a2+4));
@@ -2915,6 +2935,7 @@ static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1,
newmap->type = MAPTYPE_TXT;
newmap->datafile = fname;
newmap->checkfile = fname;
+ newmap->checkfile2= NULL;
newmap->cachename = apr_psprintf(cmd->pool, "%pp:%s",
(void *)cmd->server, a1);
}
@@ -4034,7 +4055,6 @@ static int pre_config(apr_pool_t *pconf,
APR_OPTIONAL_FN_TYPE(ap_register_rewrite_mapfunc) *map_pfn_register;
/* register int: rewritemap handlers */
- mapfunc_hash = apr_hash_make(pconf);
map_pfn_register = APR_RETRIEVE_OPTIONAL_FN(ap_register_rewrite_mapfunc);
if (map_pfn_register) {
map_pfn_register("tolower", rewrite_mapfunc_tolower);
@@ -4287,6 +4307,10 @@ static int hook_uri2file(request_rec *r)
return HTTP_FORBIDDEN;
}
+ if (rulestatus == ACTION_NOESCAPE) {
+ apr_table_setn(r->notes, "proxy-nocanon", "1");
+ }
+
/* make sure the QUERY_STRING and
* PATH_INFO parts get incorporated
*/
@@ -4829,6 +4853,10 @@ static void register_hooks(apr_pool_t *p)
*/
static const char * const aszPre[]={ "mod_proxy.c", NULL };
+ /* make the hashtable before registering the function, so that
+ * other modules are prevented from accessing uninitialized memory.
+ */
+ mapfunc_hash = apr_hash_make(p);
APR_REGISTER_OPTIONAL_FN(ap_register_rewrite_mapfunc);
ap_hook_handler(handler_redirect, NULL, NULL, APR_HOOK_MIDDLE);
diff --git a/modules/mappers/mod_speling.c b/modules/mappers/mod_speling.c
index 270b47ee..78f58b07 100644
--- a/modules/mappers/mod_speling.c
+++ b/modules/mappers/mod_speling.c
@@ -225,12 +225,6 @@ static int check_speling(request_rec *r)
return DECLINED;
}
- /* we default to reject path info (same as core handler) */
- if ((r->used_path_info != AP_REQ_ACCEPT_PATH_INFO) &&
- r->path_info && *r->path_info) {
- return DECLINED;
- }
-
/*
* The request should end up looking like this:
* r->uri: /correct-url/mispelling/more
diff --git a/modules/metadata/mod_headers.c b/modules/metadata/mod_headers.c
index e139b5db..d35c44f1 100644
--- a/modules/metadata/mod_headers.c
+++ b/modules/metadata/mod_headers.c
@@ -33,7 +33,10 @@
* add - add this header, possible resulting in two or more
* headers with the same name
* append - append this text onto any existing header of this same
+ * merge - merge this text onto any existing header of this same,
+ * avoiding duplicate values
* unset - remove this header
+ * edit - transform the header value according to a regexp
*
* Where action is unset, the third argument (value) should not be given.
* The header name can include the colon, or not.
@@ -88,6 +91,7 @@ typedef enum {
hdr_add = 'a', /* add header (could mean multiple hdrs) */
hdr_set = 's', /* set (replace old value) */
hdr_append = 'm', /* append (merge into any old value) */
+ hdr_merge = 'g', /* merge (merge, but avoid duplicates) */
hdr_unset = 'u', /* unset header */
hdr_echo = 'e', /* echo headers from request to response */
hdr_edit = 'r' /* change value by regexp */
@@ -394,6 +398,8 @@ static APR_INLINE const char *header_inout_cmd(cmd_parms *cmd,
new->action = hdr_add;
else if (!strcasecmp(action, "append"))
new->action = hdr_append;
+ else if (!strcasecmp(action, "merge"))
+ new->action = hdr_merge;
else if (!strcasecmp(action, "unset"))
new->action = hdr_unset;
else if (!strcasecmp(action, "echo"))
@@ -401,8 +407,8 @@ static APR_INLINE const char *header_inout_cmd(cmd_parms *cmd,
else if (!strcasecmp(action, "edit"))
new->action = hdr_edit;
else
- return "first argument must be 'add', 'set', 'append', 'unset', "
- "'echo' or 'edit'.";
+ return "first argument must be 'add', 'set', 'append', 'merge', "
+ "'unset', 'echo', or 'edit'.";
if (new->action == hdr_edit) {
if (subs == NULL) {
@@ -610,6 +616,46 @@ static void do_headers_fixup(request_rec *r, apr_table_t *headers,
case hdr_append:
apr_table_mergen(headers, hdr->header, process_tags(hdr, r));
break;
+ case hdr_merge:
+ val = apr_table_get(headers, hdr->header);
+ if (val == NULL) {
+ apr_table_addn(headers, hdr->header, process_tags(hdr, r));
+ } else {
+ char *new_val = process_tags(hdr, r);
+ apr_size_t new_val_len = strlen(new_val);
+ int tok_found = 0;
+
+ /* modified version of logic in ap_get_token() */
+ while (*val) {
+ const char *tok_start;
+
+ while (*val && apr_isspace(*val))
+ ++val;
+
+ tok_start = val;
+
+ while (*val && *val != ',') {
+ if (*val++ == '"')
+ while (*val)
+ if (*val++ == '"')
+ break;
+ }
+
+ if (new_val_len == (apr_size_t)(val - tok_start)
+ && !strncmp(tok_start, new_val, new_val_len)) {
+ tok_found = 1;
+ break;
+ }
+
+ if (*val)
+ ++val;
+ }
+
+ if (!tok_found) {
+ apr_table_mergen(headers, hdr->header, new_val);
+ }
+ }
+ break;
case hdr_set:
apr_table_setn(headers, hdr->header, process_tags(hdr, r));
break;
diff --git a/modules/metadata/mod_unique_id.c b/modules/metadata/mod_unique_id.c
index 27e18d53..a78e0206 100644
--- a/modules/metadata/mod_unique_id.c
+++ b/modules/metadata/mod_unique_id.c
@@ -304,7 +304,7 @@ static int gen_unique_id(request_rec *r)
new_unique_id.pid = cur_unique_id.pid;
new_unique_id.counter = cur_unique_id.counter;
- new_unique_id.stamp = htonl((unsigned int)r->request_time);
+ new_unique_id.stamp = htonl((unsigned int)apr_time_sec(r->request_time));
new_unique_id.thread_index = htonl((unsigned int)r->connection->id);
/* we'll use a temporal buffer to avoid uuencoding the possible internal
diff --git a/modules/proxy/ajp.h b/modules/proxy/ajp.h
index 8c022fb3..8327e8d4 100644
--- a/modules/proxy/ajp.h
+++ b/modules/proxy/ajp.h
@@ -147,6 +147,7 @@ struct ajp_msg
#define AJP_MSG_BUFFER_SZ 8192
#define AJP_MAX_BUFFER_SZ 65536
#define AJP13_MAX_SEND_BODY_SZ (AJP_MAX_BUFFER_SZ - AJP_HEADER_SZ)
+#define AJP_PING_PONG_SZ 128
/** Send a request from web server to container*/
#define CMD_AJP13_FORWARD_REQUEST (unsigned char)2
diff --git a/modules/proxy/ajp_utils.c b/modules/proxy/ajp_utils.c
index 5a0e8772..780aeb48 100644
--- a/modules/proxy/ajp_utils.c
+++ b/modules/proxy/ajp_utils.c
@@ -31,7 +31,7 @@ apr_status_t ajp_handle_cping_cpong(apr_socket_t *sock,
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"Into ajp_handle_cping_cpong");
- rc = ajp_msg_create(r->pool, AJP_HEADER_SZ_LEN+1, &msg);
+ rc = ajp_msg_create(r->pool, AJP_PING_PONG_SZ, &msg);
if (rc != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
"ajp_handle_cping_cpong: ajp_msg_create failed");
diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
index de48638b..00bcfeba 100644
--- a/modules/proxy/mod_proxy.c
+++ b/modules/proxy/mod_proxy.c
@@ -168,6 +168,15 @@ static const char *set_worker_param(apr_pool_t *p,
return "KeepAlive must be On|Off";
worker->keepalive_set = 1;
}
+ else if (!strcasecmp(key, "disablereuse")) {
+ if (!strcasecmp(val, "on"))
+ worker->disablereuse = 1;
+ else if (!strcasecmp(val, "off"))
+ worker->disablereuse = 0;
+ else
+ return "DisableReuse must be On|Off";
+ worker->disablereuse_set = 1;
+ }
else if (!strcasecmp(key, "route")) {
/* Worker route.
*/
@@ -432,6 +441,59 @@ static int proxy_detect(request_rec *r)
return DECLINED;
}
+static const char *proxy_interpolate(request_rec *r, const char *str)
+{
+ /* Interpolate an env str in a configuration string
+ * Syntax ${var} --> value_of(var)
+ * Method: replace one var, and recurse on remainder of string
+ * Nothing clever here, and crap like nested vars may do silly things
+ * but we'll at least avoid sending the unwary into a loop
+ */
+ const char *start;
+ const char *end;
+ const char *var;
+ const char *val;
+ const char *firstpart;
+
+ start = ap_strstr_c(str, "${");
+ if (start == NULL) {
+ return str;
+ }
+ end = ap_strchr_c(start+2, '}');
+ if (end == NULL) {
+ return str;
+ }
+ /* OK, this is syntax we want to interpolate. Is there such a var ? */
+ var = apr_pstrndup(r->pool, start+2, end-(start+2));
+ val = apr_table_get(r->subprocess_env, var);
+ firstpart = apr_pstrndup(r->pool, str, (start-str));
+
+ if (val == NULL) {
+ return apr_pstrcat(r->pool, firstpart,
+ proxy_interpolate(r, end+1), NULL);
+ }
+ else {
+ return apr_pstrcat(r->pool, firstpart, val,
+ proxy_interpolate(r, end+1), NULL);
+ }
+}
+static apr_array_header_t *proxy_vars(request_rec *r,
+ apr_array_header_t *hdr)
+{
+ int i;
+ apr_array_header_t *ret = apr_array_make(r->pool, hdr->nelts,
+ sizeof (struct proxy_alias));
+ struct proxy_alias *old = (struct proxy_alias *) hdr->elts;
+
+ for (i = 0; i < hdr->nelts; ++i) {
+ struct proxy_alias *newcopy = apr_array_push(ret);
+ newcopy->fake = (old[i].flags & PROXYPASS_INTERPOLATE)
+ ? proxy_interpolate(r, old[i].fake) : old[i].fake;
+ newcopy->real = (old[i].flags & PROXYPASS_INTERPOLATE)
+ ? proxy_interpolate(r, old[i].real) : old[i].real;
+ }
+ return ret;
+}
static int proxy_trans(request_rec *r)
{
void *sconf = r->server->module_config;
@@ -439,6 +501,10 @@ static int proxy_trans(request_rec *r)
(proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
int i, len;
struct proxy_alias *ent = (struct proxy_alias *) conf->aliases->elts;
+ proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
+ &proxy_module);
+ const char *fake;
+ const char *real;
ap_regmatch_t regm[AP_MAX_REG_MATCH];
ap_regmatch_t reg1[AP_MAX_REG_MATCH];
char *found = NULL;
@@ -459,9 +525,18 @@ static int proxy_trans(request_rec *r)
for (i = 0; i < conf->aliases->nelts; i++) {
unsigned int nocanon = ent[i].flags & PROXYPASS_NOCANON;
const char *use_uri = nocanon ? r->unparsed_uri : r->uri;
+ if ((dconf->interpolate_env == 1)
+ && (ent[i].flags & PROXYPASS_INTERPOLATE)) {
+ fake = proxy_interpolate(r, ent[i].fake);
+ real = proxy_interpolate(r, ent[i].real);
+ }
+ else {
+ fake = ent[i].fake;
+ real = ent[i].real;
+ }
if (ent[i].regex) {
if (!ap_regexec(ent[i].regex, r->uri, AP_MAX_REG_MATCH, regm, 0)) {
- if ((ent[i].real[0] == '!') && (ent[i].real[1] == '\0')) {
+ if ((real[0] == '!') && (real[1] == '\0')) {
return DECLINED;
}
/* test that we haven't reduced the URI */
@@ -470,8 +545,7 @@ static int proxy_trans(request_rec *r)
mismatch = 1;
use_uri = r->uri;
}
- found = ap_pregsub(r->pool, ent[i].real, use_uri,
- AP_MAX_REG_MATCH,
+ found = ap_pregsub(r->pool, real, use_uri, AP_MAX_REG_MATCH,
(use_uri == r->uri) ? regm : reg1);
/* Note: The strcmp() below catches cases where there
* was no regex substitution. This is so cases like:
@@ -486,20 +560,20 @@ static int proxy_trans(request_rec *r)
*
* which may be confusing.
*/
- if (found && strcmp(found, ent[i].real)) {
+ if (found && strcmp(found, real)) {
found = apr_pstrcat(r->pool, "proxy:", found, NULL);
}
else {
- found = apr_pstrcat(r->pool, "proxy:", ent[i].real,
+ found = apr_pstrcat(r->pool, "proxy:", real,
use_uri, NULL);
}
}
}
else {
- len = alias_match(r->uri, ent[i].fake);
+ len = alias_match(r->uri, fake);
- if (len > 0) {
- if ((ent[i].real[0] == '!') && (ent[i].real[1] == '\0')) {
+ if (len != 0) {
+ if ((real[0] == '!') && (real[1] == '\0')) {
return DECLINED;
}
if (nocanon
@@ -507,7 +581,7 @@ static int proxy_trans(request_rec *r)
mismatch = 1;
use_uri = r->uri;
}
- found = apr_pstrcat(r->pool, "proxy:", ent[i].real,
+ found = apr_pstrcat(r->pool, "proxy:", real,
use_uri + len, NULL);
}
}
@@ -591,6 +665,7 @@ static int proxy_map_location(request_rec *r)
return OK;
}
+
/* -------------------------------------------------------------- */
/* Fixup the filename */
@@ -601,6 +676,8 @@ static int proxy_fixup(request_rec *r)
{
char *url, *p;
int access_status;
+ proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
+ &proxy_module);
if (!r->proxyreq || !r->filename || strncmp(r->filename, "proxy:", 6) != 0)
return DECLINED;
@@ -608,6 +685,17 @@ static int proxy_fixup(request_rec *r)
/* XXX: Shouldn't we try this before we run the proxy_walk? */
url = &r->filename[6];
+ if ((dconf->interpolate_env == 1) && (r->proxyreq == PROXYREQ_REVERSE)) {
+ /* create per-request copy of reverse proxy conf,
+ * and interpolate vars in it
+ */
+ proxy_req_conf *rconf = apr_palloc(r->pool, sizeof(proxy_req_conf));
+ ap_set_module_config(r->request_config, &proxy_module, rconf);
+ rconf->raliases = proxy_vars(r, dconf->raliases);
+ rconf->cookie_paths = proxy_vars(r, dconf->cookie_paths);
+ rconf->cookie_domains = proxy_vars(r, dconf->cookie_domains);
+ }
+
/* canonicalise each specific scheme */
if ((access_status = proxy_run_canon_handler(r, url))) {
return access_status;
@@ -830,12 +918,41 @@ static int proxy_handler(request_rec *r)
ents[i].hostname,
ents[i].port);
- /* an error or success */
- if (access_status != DECLINED &&
- access_status != HTTP_BAD_GATEWAY) {
- goto cleanup;
+ /* Did the scheme handler process the request? */
+ if (access_status != DECLINED) {
+ const char *cl_a;
+ char *end;
+ apr_off_t cl;
+
+ /*
+ * An fatal error or success, so no point in
+ * retrying with a direct connection.
+ */
+ if (access_status != HTTP_BAD_GATEWAY) {
+ goto cleanup;
+ }
+ cl_a = apr_table_get(r->headers_in, "Content-Length");
+ if (cl_a) {
+ apr_strtoff(&cl, cl_a, &end, 0);
+ /*
+ * The request body is of length > 0. We cannot
+ * retry with a direct connection since we already
+ * sent (parts of) the request body to the proxy
+ * and do not have any longer.
+ */
+ if (cl > 0) {
+ goto cleanup;
+ }
+ }
+ /*
+ * Transfer-Encoding was set as input header, so we had
+ * a request body. We cannot retry with a direct
+ * connection for the same reason as above.
+ */
+ if (apr_table_get(r->headers_in, "Transfer-Encoding")) {
+ goto cleanup;
+ }
}
- /* we failed to talk to the upstream proxy */
}
}
}
@@ -1002,6 +1119,7 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy)
new->cookie_domains = apr_array_make(p, 10, sizeof(struct proxy_alias));
new->cookie_path_str = apr_strmatch_precompile(p, "path=", 0);
new->cookie_domain_str = apr_strmatch_precompile(p, "domain=", 0);
+ new->interpolate_env = -1; /* unset */
return (void *) new;
}
@@ -1024,6 +1142,8 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
= apr_array_append(p, base->cookie_domains, add->cookie_domains);
new->cookie_path_str = base->cookie_path_str;
new->cookie_domain_str = base->cookie_domain_str;
+ new->interpolate_env = (add->interpolate_env == -1) ? base->interpolate_env
+ : add->interpolate_env;
new->ftp_directory_charset = add->ftp_directory_charset ?
add->ftp_directory_charset :
base->ftp_directory_charset;
@@ -1140,6 +1260,9 @@ static const char *
else if (!strcasecmp(word,"nocanon")) {
flags |= PROXYPASS_NOCANON;
}
+ else if (!strcasecmp(word,"interpolate")) {
+ flags |= PROXYPASS_INTERPOLATE;
+ }
else {
char *val = strchr(word, '=');
if (!val) {
@@ -1237,31 +1360,41 @@ static const char *
}
-static const char *
- add_pass_reverse(cmd_parms *cmd, void *dconf, const char *f, const char *r)
+static const char * add_pass_reverse(cmd_parms *cmd, void *dconf, const char *f,
+ const char *r, const char *i)
{
proxy_dir_conf *conf = dconf;
struct proxy_alias *new;
-
- if (r!=NULL && cmd->path == NULL ) {
- new = apr_array_push(conf->raliases);
- new->fake = f;
- new->real = r;
- } else if (r==NULL && cmd->path != NULL) {
- new = apr_array_push(conf->raliases);
- new->fake = cmd->path;
- new->real = f;
- } else {
- if ( r == NULL)
+ const char *fake;
+ const char *real;
+ const char *interp;
+
+ if (cmd->path == NULL) {
+ fake = f;
+ real = r;
+ interp = i;
+ if (r == NULL || !strcasecmp(r, "interpolate")) {
return "ProxyPassReverse needs a path when not defined in a location";
- else
+ }
+ }
+ else {
+ fake = cmd->path;
+ real = f;
+ if (r && strcasecmp(r, "interpolate")) {
return "ProxyPassReverse can not have a path when defined in a location";
+ }
+ interp = r;
}
+ new = apr_array_push(conf->raliases);
+ new->fake = fake;
+ new->real = real;
+ new->flags = interp ? PROXYPASS_INTERPOLATE : 0;
+
return NULL;
}
-static const char*
- cookie_path(cmd_parms *cmd, void *dconf, const char *f, const char *r)
+static const char* cookie_path(cmd_parms *cmd, void *dconf, const char *f,
+ const char *r, const char *interp)
{
proxy_dir_conf *conf = dconf;
struct proxy_alias *new;
@@ -1269,11 +1402,12 @@ static const char*
new = apr_array_push(conf->cookie_paths);
new->fake = f;
new->real = r;
+ new->flags = interp ? PROXYPASS_INTERPOLATE : 0;
return NULL;
}
-static const char*
- cookie_domain(cmd_parms *cmd, void *dconf, const char *f, const char *r)
+static const char* cookie_domain(cmd_parms *cmd, void *dconf, const char *f,
+ const char *r, const char *interp)
{
proxy_dir_conf *conf = dconf;
struct proxy_alias *new;
@@ -1281,7 +1415,7 @@ static const char*
new = apr_array_push(conf->cookie_domains);
new->fake = f;
new->real = r;
-
+ new->flags = interp ? PROXYPASS_INTERPOLATE : 0;
return NULL;
}
@@ -1902,15 +2036,18 @@ static const command_rec proxy_cmds[] =
"a scheme, partial URL or '*' and a proxy server"),
AP_INIT_TAKE2("ProxyRemoteMatch", add_proxy_regex, NULL, RSRC_CONF,
"a regex pattern and a proxy server"),
+ AP_INIT_FLAG("ProxyPassInterpolateEnv", ap_set_flag_slot,
+ (void*)APR_OFFSETOF(proxy_dir_conf, interpolate_env),
+ RSRC_CONF|ACCESS_CONF, "Interpolate Env Vars in reverse Proxy") ,
AP_INIT_RAW_ARGS("ProxyPass", add_pass_noregex, NULL, RSRC_CONF|ACCESS_CONF,
"a virtual path and a URL"),
AP_INIT_RAW_ARGS("ProxyPassMatch", add_pass_regex, NULL, RSRC_CONF|ACCESS_CONF,
"a virtual path and a URL"),
- AP_INIT_TAKE12("ProxyPassReverse", add_pass_reverse, NULL, RSRC_CONF|ACCESS_CONF,
+ AP_INIT_TAKE123("ProxyPassReverse", add_pass_reverse, NULL, RSRC_CONF|ACCESS_CONF,
"a virtual path and a URL for reverse proxy behaviour"),
- AP_INIT_TAKE2("ProxyPassReverseCookiePath", cookie_path, NULL,
+ AP_INIT_TAKE23("ProxyPassReverseCookiePath", cookie_path, NULL,
RSRC_CONF|ACCESS_CONF, "Path rewrite rule for proxying cookies"),
- AP_INIT_TAKE2("ProxyPassReverseCookieDomain", cookie_domain, NULL,
+ AP_INIT_TAKE23("ProxyPassReverseCookieDomain", cookie_domain, NULL,
RSRC_CONF|ACCESS_CONF, "Domain rewrite rule for proxying cookies"),
AP_INIT_ITERATE("ProxyBlock", set_proxy_exclude, NULL, RSRC_CONF,
"A list of names, hosts or domains to which the proxy will not connect"),
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
index 3944104e..fdb48bf3 100644
--- a/modules/proxy/mod_proxy.h
+++ b/modules/proxy/mod_proxy.h
@@ -110,6 +110,7 @@ struct proxy_remote {
};
#define PROXYPASS_NOCANON 0x01
+#define PROXYPASS_INTERPOLATE 0x02
struct proxy_alias {
const char *real;
const char *fake;
@@ -213,14 +214,24 @@ typedef struct {
const apr_strmatch_pattern* cookie_path_str;
const apr_strmatch_pattern* cookie_domain_str;
const char *ftp_directory_charset;
+ int interpolate_env;
} proxy_dir_conf;
+/* if we interpolate env vars per-request, we'll need a per-request
+ * copy of the reverse proxy config
+ */
+typedef struct {
+ apr_array_header_t *raliases;
+ apr_array_header_t* cookie_paths;
+ apr_array_header_t* cookie_domains;
+} proxy_req_conf;
+
typedef struct {
conn_rec *connection;
const char *hostname;
apr_port_t port;
int is_ssl;
- apr_pool_t *pool; /* Subpool used for creating socket */
+ apr_pool_t *pool; /* Subpool for hostname and addr data */
apr_socket_t *sock; /* Connection socket */
apr_sockaddr_t *addr; /* Preparsed remote address info */
apr_uint32_t flags; /* Conection flags */
@@ -231,6 +242,11 @@ typedef struct {
#if APR_HAS_THREADS
int inreslist; /* connection in apr_reslist? */
#endif
+ apr_pool_t *scpool; /* Subpool used for socket and connection data */
+ request_rec *r; /* Request record of the frontend request
+ * which the backend currently answers. */
+ int need_flush;/* Flag to decide whether we need to flush the
+ * filter chain or not */
} proxy_conn_rec;
typedef struct {
@@ -337,6 +353,8 @@ struct proxy_worker {
apr_interval_time_t ping_timeout;
char ping_timeout_set;
char retry_set;
+ char disablereuse;
+ char disablereuse_set;
};
/*
@@ -473,6 +491,8 @@ PROXY_DECLARE(apr_status_t) ap_proxy_string_read(conn_rec *c, apr_bucket_brigade
PROXY_DECLARE(void) ap_proxy_table_unmerge(apr_pool_t *p, apr_table_t *t, char *key);
/* DEPRECATED (will be replaced with ap_proxy_connect_backend */
PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **, const char *, apr_sockaddr_t *, const char *, proxy_server_conf *, server_rec *, apr_pool_t *);
+PROXY_DECLARE(apr_status_t) ap_proxy_ssl_connection_cleanup(proxy_conn_rec *conn,
+ request_rec *r);
PROXY_DECLARE(int) ap_proxy_ssl_enable(conn_rec *c);
PROXY_DECLARE(int) ap_proxy_ssl_disable(conn_rec *c);
PROXY_DECLARE(int) ap_proxy_conn_is_https(conn_rec *c);
diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
index bad2b26e..80a6e0ab 100644
--- a/modules/proxy/mod_proxy_ajp.c
+++ b/modules/proxy/mod_proxy_ajp.c
@@ -29,7 +29,8 @@ module AP_MODULE_DECLARE_DATA proxy_ajp_module;
*/
static int proxy_ajp_canon(request_rec *r, char *url)
{
- char *host, *path, *search, sport[7];
+ char *host, *path, sport[7];
+ char *search = NULL;
const char *err;
apr_port_t port = AJP13_DEF_PORT;
@@ -57,23 +58,18 @@ static int proxy_ajp_canon(request_rec *r, char *url)
}
/*
- * now parse path/search args, according to rfc1738
- *
- * N.B. if this isn't a true proxy request, then the URL _path_
- * has already been decoded. True proxy requests have
- * r->uri == r->unparsed_uri, and no others have that property.
+ * now parse path/search args, according to rfc1738:
+ * process the path. With proxy-noncanon set (by
+ * mod_proxy) we use the raw, unparsed uri
*/
- if (r->uri == r->unparsed_uri) {
- search = strchr(url, '?');
- if (search != NULL)
- *(search++) = '\0';
+ if (apr_table_get(r->notes, "proxy-nocanon")) {
+ path = url; /* this is the raw path */
}
- else
+ else {
+ path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
+ r->proxyreq);
search = r->args;
-
- /* process path */
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
- r->proxyreq);
+ }
if (path == NULL)
return HTTP_BAD_REQUEST;
@@ -89,6 +85,37 @@ static int proxy_ajp_canon(request_rec *r, char *url)
return OK;
}
+#define METHOD_NON_IDEMPOTENT 0
+#define METHOD_IDEMPOTENT 1
+#define METHOD_IDEMPOTENT_WITH_ARGS 2
+
+static int is_idempotent(request_rec *r)
+{
+ /*
+ * RFC2616 (9.1.2): GET, HEAD, PUT, DELETE, OPTIONS, TRACE are considered
+ * idempotent. Hint: HEAD requests use M_GET as method number as well.
+ */
+ switch (r->method_number) {
+ case M_GET:
+ case M_DELETE:
+ case M_PUT:
+ case M_OPTIONS:
+ case M_TRACE:
+ /*
+ * If the request has arguments it might have side-effects and thus
+ * it might be undesirable to resent it to a backend again
+ * automatically.
+ */
+ if (r->args) {
+ return METHOD_IDEMPOTENT_WITH_ARGS;
+ }
+ return METHOD_IDEMPOTENT;
+ /* Everything else is not considered idempotent. */
+ default:
+ return METHOD_NON_IDEMPOTENT;
+ }
+}
+
/*
* XXX: AJP Auto Flushing
*
@@ -122,7 +149,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
apr_bucket_brigade *input_brigade;
apr_bucket_brigade *output_brigade;
ajp_msg_t *msg;
- apr_size_t bufsiz;
+ apr_size_t bufsiz = 0;
char *buff;
apr_uint16_t size;
const char *tenc;
@@ -138,6 +165,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
proxy_server_conf *psf =
ap_get_module_config(r->server->module_config, &proxy_module);
apr_size_t maxsize = AJP_MSG_BUFFER_SZ;
+ int send_body = 0;
if (psf->io_buffer_size_set)
maxsize = psf->io_buffer_size;
@@ -161,8 +189,17 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
conn->worker->hostname);
if (status == AJP_EOVERFLOW)
return HTTP_BAD_REQUEST;
- else
- return HTTP_SERVICE_UNAVAILABLE;
+ else {
+ /*
+ * This is only non fatal when the method is idempotent. In this
+ * case we can dare to retry it with a different worker if we are
+ * a balancer member.
+ */
+ if (is_idempotent(r) == METHOD_IDEMPOTENT) {
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
}
/* allocate an AJP message to store the data of the buckets */
@@ -231,9 +268,14 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
"proxy: send failed to %pI (%s)",
conn->worker->cp->addr,
conn->worker->hostname);
- return HTTP_SERVICE_UNAVAILABLE;
+ /*
+ * It is fatal when we failed to send a (part) of the request
+ * body.
+ */
+ return HTTP_INTERNAL_SERVER_ERROR;
}
conn->worker->s->transferred += bufsiz;
+ send_body = 1;
}
}
@@ -249,7 +291,16 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
"proxy: read response failed from %pI (%s)",
conn->worker->cp->addr,
conn->worker->hostname);
- return HTTP_SERVICE_UNAVAILABLE;
+ /*
+ * This is only non fatal when we have not sent (parts) of a possible
+ * request body so far (we do not store it and thus cannot sent it
+ * again) and the method is idempotent. In this case we can dare to
+ * retry it with a different worker if we are a balancer member.
+ */
+ if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) {
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+ return HTTP_INTERNAL_SERVER_ERROR;
}
/* parse the reponse */
result = ajp_parse_type(r, conn->data);
diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c
index d2ae88bb..bcc47cfc 100644
--- a/modules/proxy/mod_proxy_balancer.c
+++ b/modules/proxy/mod_proxy_balancer.c
@@ -23,12 +23,16 @@
#include "ap_mpm.h"
#include "apr_version.h"
#include "apr_hooks.h"
+#include "apr_uuid.h"
module AP_MODULE_DECLARE_DATA proxy_balancer_module;
+static char balancer_nonce[APR_UUID_FORMATTED_LENGTH + 1];
+
static int proxy_balancer_canon(request_rec *r, char *url)
{
- char *host, *path, *search;
+ char *host, *path;
+ char *search = NULL;
const char *err;
apr_port_t port = 0;
@@ -52,21 +56,19 @@ static int proxy_balancer_canon(request_rec *r, char *url)
url, err);
return HTTP_BAD_REQUEST;
}
- /* now parse path/search args, according to rfc1738 */
- /* N.B. if this isn't a true proxy request, then the URL _path_
- * has already been decoded. True proxy requests have r->uri
- * == r->unparsed_uri, and no others have that property.
+ /*
+ * now parse path/search args, according to rfc1738:
+ * process the path. With proxy-noncanon set (by
+ * mod_proxy) we use the raw, unparsed uri
*/
- if (r->uri == r->unparsed_uri) {
- search = strchr(url, '?');
- if (search != NULL)
- *(search++) = '\0';
+ if (apr_table_get(r->notes, "proxy-nocanon")) {
+ path = url; /* this is the raw path */
}
- else
+ else {
+ path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
+ r->proxyreq);
search = r->args;
-
- /* process path */
- path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, r->proxyreq);
+ }
if (path == NULL)
return HTTP_BAD_REQUEST;
@@ -589,6 +591,31 @@ static void recalc_factors(proxy_balancer *balancer)
}
}
+/* post_config hook: */
+static int balancer_init(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data;
+ const char *userdata_key = "mod_proxy_balancer_init";
+ apr_uuid_t uuid;
+
+ /* balancer_init() will be called twice during startup. So, only
+ * set up the static data the second time through. */
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (!data) {
+ apr_pool_userdata_set((const void *)1, userdata_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return OK;
+ }
+
+ /* Retrieve a UUID and store the nonce for the lifetime of
+ * the process. */
+ apr_uuid_get(&uuid);
+ apr_uuid_format(balancer_nonce, &uuid);
+
+ return OK;
+}
+
/* Manages the loadfactors and member status
*/
static int balancer_handler(request_rec *r)
@@ -631,6 +658,14 @@ static int balancer_handler(request_rec *r)
return HTTP_BAD_REQUEST;
}
}
+
+ /* Check that the supplied nonce matches this server's nonce;
+ * otherwise ignore all parameters, to prevent a CSRF attack. */
+ if ((name = apr_table_get(params, "nonce")) == NULL
+ || strcmp(balancer_nonce, name) != 0) {
+ apr_table_clear(params);
+ }
+
if ((name = apr_table_get(params, "b")))
bsel = ap_proxy_get_balancer(r->pool, conf,
apr_pstrcat(r->pool, "balancer://", name, NULL));
@@ -762,6 +797,7 @@ static int balancer_handler(request_rec *r)
ap_rvputs(r, "<tr>\n<td><a href=\"", r->uri, "?b=",
balancer->name + sizeof("balancer://") - 1, "&w=",
ap_escape_uri(r->pool, worker->name),
+ "&nonce=", balancer_nonce,
"\">", NULL);
ap_rvputs(r, worker->name, "</a></td>", NULL);
ap_rvputs(r, "<td>", ap_escape_html(r->pool, worker->s->route),
@@ -825,6 +861,8 @@ static int balancer_handler(request_rec *r)
ap_rvputs(r, "<input type=hidden name=\"b\" ", NULL);
ap_rvputs(r, "value=\"", bsel->name + sizeof("balancer://") - 1,
"\">\n</form>\n", NULL);
+ ap_rvputs(r, "<input type=hidden name=\"nonce\" value=\"",
+ balancer_nonce, "\">\n", NULL);
ap_rputs("<hr />\n", r);
}
ap_rputs(ap_psignature("",r), r);
@@ -1063,6 +1101,7 @@ static void ap_proxy_balancer_register_hook(apr_pool_t *p)
*/
static const char *const aszPred[] = { "mpm_winnt.c", "mod_proxy.c", NULL};
/* manager handler */
+ ap_hook_post_config(balancer_init, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(balancer_handler, NULL, NULL, APR_HOOK_FIRST);
ap_hook_child_init(child_init, aszPred, NULL, APR_HOOK_MIDDLE);
proxy_hook_pre_request(proxy_balancer_pre_request, NULL, NULL, APR_HOOK_FIRST);
diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
index 3cacac6e..75a2054e 100644
--- a/modules/proxy/mod_proxy_ftp.c
+++ b/modules/proxy/mod_proxy_ftp.c
@@ -314,6 +314,7 @@ static apr_status_t proxy_send_dir_filter(ap_filter_t *f,
/* basedir is either "", or "/%2f" for the "squid %2f hack" */
const char *basedir = ""; /* By default, path is relative to the $HOME dir */
char *wildcard = NULL;
+ const char *escpath;
/* Save "scheme://site" prefix without password */
site = apr_uri_unparse(p, &f->r->parsed_uri, APR_URI_UNP_OMITPASSWORD | APR_URI_UNP_OMITPATHINFO);
@@ -350,13 +351,14 @@ static apr_status_t proxy_send_dir_filter(ap_filter_t *f,
str = (basedir[0] != '\0') ? "<a href=\"/%2f/\">%2f</a>/" : "";
/* print "ftp://host/" */
+ escpath = ap_escape_html(p, path);
str = apr_psprintf(p, DOCTYPE_HTML_3_2
"<html>\n <head>\n <title>%s%s%s</title>\n"
+ "<base href=\"%s%s%s\">\n"
" </head>\n"
" <body>\n <h2>Directory of "
"<a href=\"/\">%s</a>/%s",
- site, basedir, ap_escape_html(p, path),
- site, str);
+ site, basedir, escpath, site, basedir, escpath, site, str);
APR_BRIGADE_INSERT_TAIL(out, apr_bucket_pool_create(str, strlen(str),
p, c->bucket_alloc));
@@ -959,6 +961,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
}
/* TODO: see if ftp could use determine_connection */
backend->addr = connect_addr;
+ backend->r = r;
ap_set_module_config(c->conn_config, &proxy_ftp_module, backend);
}
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
index e5f654bb..3ca21895 100644
--- a/modules/proxy/mod_proxy_http.c
+++ b/modules/proxy/mod_proxy_http.c
@@ -33,7 +33,8 @@ static apr_status_t ap_proxy_http_cleanup(const char *scheme,
*/
static int proxy_http_canon(request_rec *r, char *url)
{
- char *host, *path, *search, sport[7];
+ char *host, *path, sport[7];
+ char *search = NULL;
const char *err;
const char *scheme;
apr_port_t port, def_port;
@@ -67,21 +68,11 @@ static int proxy_http_canon(request_rec *r, char *url)
return HTTP_BAD_REQUEST;
}
- /* now parse path/search args, according to rfc1738 */
- /* N.B. if this isn't a true proxy request, then the URL _path_
- * has already been decoded. True proxy requests have r->uri
- * == r->unparsed_uri, and no others have that property.
- */
- if (r->uri == r->unparsed_uri) {
- search = strchr(url, '?');
- if (search != NULL)
- *(search++) = '\0';
- }
- else
- search = r->args;
-
- /* process path */
- /* In a reverse proxy, our URL has been processed, so canonicalise
+ /*
+ * now parse path/search args, according to rfc1738:
+ * process the path.
+ *
+ * In a reverse proxy, our URL has been processed, so canonicalise
* unless proxy-nocanon is set to say it's raw
* In a forward proxy, we have and MUST NOT MANGLE the original.
*/
@@ -94,6 +85,7 @@ static int proxy_http_canon(request_rec *r, char *url)
else {
path = ap_proxy_canonenc(r->pool, url, strlen(url),
enc_path, 0, r->proxyreq);
+ search = r->args;
}
break;
case PROXYREQ_PROXY:
@@ -259,7 +251,7 @@ static void terminate_headers(apr_bucket_alloc_t *bucket_alloc,
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
}
-static apr_status_t pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+static int pass_brigade(apr_bucket_alloc_t *bucket_alloc,
request_rec *r, proxy_conn_rec *conn,
conn_rec *origin, apr_bucket_brigade *bb,
int flush)
@@ -279,22 +271,27 @@ static apr_status_t pass_brigade(apr_bucket_alloc_t *bucket_alloc,
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: pass request body failed to %pI (%s)",
conn->addr, conn->hostname);
- return status;
+ if (origin->aborted) {
+ return APR_STATUS_IS_TIMEUP(status) ? HTTP_GATEWAY_TIME_OUT : HTTP_BAD_GATEWAY;
+ }
+ else {
+ return HTTP_BAD_REQUEST;
+ }
}
apr_brigade_cleanup(bb);
- return APR_SUCCESS;
+ return OK;
}
#define MAX_MEM_SPOOL 16384
-static apr_status_t stream_reqbody_chunked(apr_pool_t *p,
+static int stream_reqbody_chunked(apr_pool_t *p,
request_rec *r,
proxy_conn_rec *p_conn,
conn_rec *origin,
apr_bucket_brigade *header_brigade,
apr_bucket_brigade *input_brigade)
{
- int seen_eos = 0;
+ int seen_eos = 0, rv = OK;
apr_size_t hdr_len;
apr_off_t bytes;
apr_status_t status;
@@ -352,7 +349,7 @@ static apr_status_t stream_reqbody_chunked(apr_pool_t *p,
*/
status = ap_save_brigade(NULL, &bb, &input_brigade, p);
if (status != APR_SUCCESS) {
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
header_brigade = NULL;
@@ -362,9 +359,9 @@ static apr_status_t stream_reqbody_chunked(apr_pool_t *p,
}
/* The request is flushed below this loop with chunk EOS header */
- status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0);
- if (status != APR_SUCCESS) {
- return status;
+ rv = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0);
+ if (rv != OK) {
+ return rv;
}
if (seen_eos) {
@@ -376,7 +373,7 @@ static apr_status_t stream_reqbody_chunked(apr_pool_t *p,
HUGE_STRING_LEN);
if (status != APR_SUCCESS) {
- return status;
+ return HTTP_BAD_REQUEST;
}
}
@@ -403,11 +400,11 @@ static apr_status_t stream_reqbody_chunked(apr_pool_t *p,
APR_BRIGADE_INSERT_TAIL(bb, e);
/* Now we have headers-only, or the chunk EOS mark; flush it */
- status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
- return status;
+ rv = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
+ return rv;
}
-static apr_status_t stream_reqbody_cl(apr_pool_t *p,
+static int stream_reqbody_cl(apr_pool_t *p,
request_rec *r,
proxy_conn_rec *p_conn,
conn_rec *origin,
@@ -415,7 +412,7 @@ static apr_status_t stream_reqbody_cl(apr_pool_t *p,
apr_bucket_brigade *input_brigade,
const char *old_cl_val)
{
- int seen_eos = 0;
+ int seen_eos = 0, rv = 0;
apr_status_t status = APR_SUCCESS;
apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc;
apr_bucket_brigade *bb;
@@ -428,7 +425,7 @@ static apr_status_t stream_reqbody_cl(apr_pool_t *p,
add_cl(p, bucket_alloc, header_brigade, old_cl_val);
if (APR_SUCCESS != (status = apr_strtoff(&cl_val, old_cl_val, NULL,
0))) {
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
}
terminate_headers(bucket_alloc, header_brigade);
@@ -476,7 +473,7 @@ static apr_status_t stream_reqbody_cl(apr_pool_t *p,
*/
status = ap_save_brigade(NULL, &bb, &input_brigade, p);
if (status != APR_SUCCESS) {
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
header_brigade = NULL;
@@ -486,9 +483,9 @@ static apr_status_t stream_reqbody_cl(apr_pool_t *p,
}
/* Once we hit EOS, we are ready to flush. */
- status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos);
- if (status != APR_SUCCESS) {
- return status;
+ rv = pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos);
+ if (rv != OK) {
+ return rv ;
}
if (seen_eos) {
@@ -500,7 +497,7 @@ static apr_status_t stream_reqbody_cl(apr_pool_t *p,
HUGE_STRING_LEN);
if (status != APR_SUCCESS) {
- return status;
+ return HTTP_BAD_REQUEST;
}
}
@@ -508,7 +505,7 @@ static apr_status_t stream_reqbody_cl(apr_pool_t *p,
ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
"proxy: client %s given Content-Length did not match"
" number of body bytes read", r->connection->remote_ip);
- return APR_EOF;
+ return HTTP_BAD_REQUEST;
}
if (header_brigade) {
@@ -516,12 +513,13 @@ static apr_status_t stream_reqbody_cl(apr_pool_t *p,
* body; send it now with the flush flag
*/
bb = header_brigade;
- status = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
+ return(pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1));
}
- return status;
+
+ return OK;
}
-static apr_status_t spool_reqbody_cl(apr_pool_t *p,
+static int spool_reqbody_cl(apr_pool_t *p,
request_rec *r,
proxy_conn_rec *p_conn,
conn_rec *origin,
@@ -562,7 +560,7 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p,
if (status != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: search for temporary directory failed");
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
apr_filepath_merge(&template, temp_dir,
"modproxy.tmp.XXXXXX",
@@ -572,7 +570,7 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p,
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: creation of temporary file in directory %s failed",
temp_dir);
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
}
for (e = APR_BRIGADE_FIRST(input_brigade);
@@ -592,7 +590,7 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p,
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: write to temporary file %s failed",
tmpfile_name);
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
AP_DEBUG_ASSERT(bytes_read == bytes_written);
fsize += bytes_written;
@@ -612,7 +610,7 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p,
*/
status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
if (status != APR_SUCCESS) {
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
}
@@ -628,7 +626,7 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p,
HUGE_STRING_LEN);
if (status != APR_SUCCESS) {
- return status;
+ return HTTP_BAD_REQUEST;
}
}
@@ -662,12 +660,11 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p,
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
}
/* This is all a single brigade, pass with flush flagged */
- status = pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1);
- return status;
+ return(pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1));
}
static
-apr_status_t ap_proxy_http_request(apr_pool_t *p, request_rec *r,
+int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
proxy_conn_rec *p_conn, conn_rec *origin,
proxy_server_conf *conf,
apr_uri_t *uri,
@@ -690,7 +687,7 @@ apr_status_t ap_proxy_http_request(apr_pool_t *p, request_rec *r,
const char *old_te_val = NULL;
apr_off_t bytes_read = 0;
apr_off_t bytes;
- int force10;
+ int force10, rv;
apr_table_t *headers_in_copy;
header_brigade = apr_brigade_create(p, origin->bucket_alloc);
@@ -932,7 +929,7 @@ apr_status_t ap_proxy_http_request(apr_pool_t *p, request_rec *r,
ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
"proxy: %s Transfer-Encoding is not supported",
old_te_val);
- return APR_EINVAL;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
if (old_cl_val && old_te_val) {
@@ -965,7 +962,7 @@ apr_status_t ap_proxy_http_request(apr_pool_t *p, request_rec *r,
" from %s (%s)",
p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
c->remote_ip, c->remote_host ? c->remote_host: "");
- return status;
+ return HTTP_BAD_REQUEST;
}
apr_brigade_length(temp_brigade, 1, &bytes);
@@ -987,7 +984,7 @@ apr_status_t ap_proxy_http_request(apr_pool_t *p, request_rec *r,
" to %pI (%s) from %s (%s)",
p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
c->remote_ip, c->remote_host ? c->remote_host: "");
- return status;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
/* Ensure we don't hit a wall where we have a buffer too small
@@ -1101,37 +1098,38 @@ skip_body:
/* send the request body, if any. */
switch(rb_method) {
case RB_STREAM_CHUNKED:
- status = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade,
+ rv = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade,
input_brigade);
break;
case RB_STREAM_CL:
- status = stream_reqbody_cl(p, r, p_conn, origin, header_brigade,
+ rv = stream_reqbody_cl(p, r, p_conn, origin, header_brigade,
input_brigade, old_cl_val);
break;
case RB_SPOOL_CL:
- status = spool_reqbody_cl(p, r, p_conn, origin, header_brigade,
+ rv = spool_reqbody_cl(p, r, p_conn, origin, header_brigade,
input_brigade, (old_cl_val != NULL)
|| (old_te_val != NULL)
|| (bytes_read > 0));
break;
default:
/* shouldn't be possible */
- status = APR_EINVAL;
+ rv = HTTP_INTERNAL_SERVER_ERROR ;
break;
}
- if (status != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
+ if (rv != OK) {
+ /* apr_errno value has been logged in lower level method */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
"proxy: pass request body failed to %pI (%s)"
" from %s (%s)",
p_conn->addr,
p_conn->hostname ? p_conn->hostname: "",
c->remote_ip,
c->remote_host ? c->remote_host: "");
- return status;
+ return rv;
}
- return APR_SUCCESS;
+ return OK;
}
static void process_proxy_header(request_rec* r, proxy_dir_conf* c,
@@ -1309,6 +1307,16 @@ apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n, request_rec
return rv;
}
+/*
+ * Limit the number of interim respones we sent back to the client. Otherwise
+ * we suffer from a memory build up. Besides there is NO sense in sending back
+ * an unlimited number of interim responses to the client. Thus if we cross
+ * this limit send back a 502 (Bad Gateway).
+ */
+#ifndef AP_MAX_INTERIM_RESPONSES
+#define AP_MAX_INTERIM_RESPONSES 10
+#endif
+
static
apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
proxy_conn_rec *backend,
@@ -1323,14 +1331,15 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
apr_bucket *e;
apr_bucket_brigade *bb, *tmp_bb;
int len, backasswards;
- int interim_response; /* non-zero whilst interim 1xx responses
- * are being read. */
+ int interim_response = 0; /* non-zero whilst interim 1xx responses
+ * are being read. */
int pread_len = 0;
apr_table_t *save_table;
int backend_broke = 0;
static const char *hop_by_hop_hdrs[] =
{"Keep-Alive", "Proxy-Authenticate", "TE", "Trailer", "Upgrade", NULL};
int i;
+ const char *te = NULL;
bb = apr_brigade_create(p, c->bucket_alloc);
@@ -1358,6 +1367,56 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
"proxy: error reading status line from remote "
"server %s", backend->hostname);
+ /*
+ * If we are a reverse proxy request shutdown the connection
+ * WITHOUT ANY response to trigger a retry by the client
+ * if allowed (as for idempotent requests).
+ * BUT currently we should not do this if the request is the
+ * first request on a keepalive connection as browsers like
+ * seamonkey only display an empty page in this case and do
+ * not do a retry.
+ */
+ if (r->proxyreq == PROXYREQ_REVERSE && c->keepalives) {
+ apr_bucket *eos;
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "proxy: Closing connection to client because"
+ " reading from backend server %s failed. Number"
+ " of keepalives %i", backend->hostname,
+ c->keepalives);
+ ap_proxy_backend_broke(r, bb);
+ /*
+ * Add an EOC bucket to signal the ap_http_header_filter
+ * that it should get out of our way, BUT ensure that the
+ * EOC bucket is inserted BEFORE an EOS bucket in bb as
+ * some resource filters like mod_deflate pass everything
+ * up to the EOS down the chain immediately and sent the
+ * remainder of the brigade later (or even never). But in
+ * this case the ap_http_header_filter does not get out of
+ * our way soon enough.
+ */
+ e = ap_bucket_eoc_create(c->bucket_alloc);
+ eos = APR_BRIGADE_LAST(bb);
+ while ((APR_BRIGADE_SENTINEL(bb) != eos)
+ && !APR_BUCKET_IS_EOS(eos)) {
+ eos = APR_BUCKET_PREV(eos);
+ }
+ if (eos == APR_BRIGADE_SENTINEL(bb)) {
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+ else {
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+ ap_pass_brigade(r->output_filters, bb);
+ /* Need to return OK to avoid sending an error message */
+ return OK;
+ }
+ else if (!c->keepalives) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "proxy: NOT Closing connection to client"
+ " although reading from backend server %s"
+ " failed.", backend->hostname);
+ }
return ap_proxyerror(r, HTTP_BAD_GATEWAY,
"Error reading from remote server");
}
@@ -1461,6 +1520,11 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
backend->close += 1;
}
+ /*
+ * Save a possible Transfer-Encoding header as we need it later for
+ * ap_http_filter to know where to end.
+ */
+ te = apr_table_get(r->headers_out, "Transfer-Encoding");
/* strip connection listed hop-by-hop headers from response */
backend->close += ap_proxy_liststr(apr_table_get(r->headers_out,
"Connection"),
@@ -1469,7 +1533,9 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
ap_set_content_type(r, apr_pstrdup(p, buf));
}
- ap_proxy_pre_http_request(origin,rp);
+ if (!ap_is_HTTP_INFO(r->status)) {
+ ap_proxy_pre_http_request(origin, rp);
+ }
/* Clear hop-by-hop headers */
for (i=0; hop_by_hop_hdrs[i]; ++i) {
@@ -1518,7 +1584,12 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
backend->close += 1;
}
- interim_response = ap_is_HTTP_INFO(r->status);
+ if (ap_is_HTTP_INFO(r->status)) {
+ interim_response++;
+ }
+ else {
+ interim_response = 0;
+ }
if (interim_response) {
/* RFC2616 tells us to forward this.
*
@@ -1601,6 +1672,14 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
* ap_http_filter to know where to end.
*/
rp->headers_in = apr_table_copy(r->pool, r->headers_out);
+ /*
+ * Restore Transfer-Encoding header from response if we saved
+ * one before and there is none left. We need it for the
+ * ap_http_filter. See above.
+ */
+ if (te && !apr_table_get(rp->headers_in, "Transfer-Encoding")) {
+ apr_table_add(rp->headers_in, "Transfer-Encoding", te);
+ }
apr_table_unset(r->headers_out,"Transfer-Encoding");
@@ -1711,7 +1790,15 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
apr_brigade_cleanup(bb);
}
- } while (interim_response);
+ } while (interim_response && (interim_response < AP_MAX_INTERIM_RESPONSES));
+
+ /* See define of AP_MAX_INTERIM_RESPONSES for why */
+ if (interim_response >= AP_MAX_INTERIM_RESPONSES) {
+ return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ apr_psprintf(p,
+ "Too many (%d) interim responses from origin server",
+ interim_response));
+ }
/* If our connection with the client is to be aborted, return DONE. */
if (c->aborted || backend_broke) {
@@ -1830,14 +1917,9 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
backend->is_ssl = is_ssl;
- /*
- * TODO: Currently we cannot handle persistent SSL backend connections,
- * because we recreate backend->connection for each request and thus
- * try to initialize an already existing SSL connection. This does
- * not work.
- */
- if (is_ssl)
- backend->close_on_recycle = 1;
+ if (is_ssl) {
+ ap_proxy_ssl_connection_cleanup(backend, r);
+ }
/* Step One: Determine Who To Connect To */
if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend,
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index 910f3610..e8309e46 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -332,16 +332,16 @@ PROXY_DECLARE(const char *)
PROXY_DECLARE(request_rec *)ap_proxy_make_fake_req(conn_rec *c, request_rec *r)
{
- request_rec *rp = apr_pcalloc(c->pool, sizeof(*r));
+ request_rec *rp = apr_pcalloc(r->pool, sizeof(*r));
- rp->pool = c->pool;
+ rp->pool = r->pool;
rp->status = HTTP_OK;
- rp->headers_in = apr_table_make(c->pool, 50);
- rp->subprocess_env = apr_table_make(c->pool, 50);
- rp->headers_out = apr_table_make(c->pool, 12);
- rp->err_headers_out = apr_table_make(c->pool, 5);
- rp->notes = apr_table_make(c->pool, 5);
+ rp->headers_in = apr_table_make(r->pool, 50);
+ rp->subprocess_env = apr_table_make(r->pool, 50);
+ rp->headers_out = apr_table_make(r->pool, 12);
+ rp->err_headers_out = apr_table_make(r->pool, 5);
+ rp->notes = apr_table_make(r->pool, 5);
rp->server = r->server;
rp->proxyreq = r->proxyreq;
@@ -352,7 +352,7 @@ PROXY_DECLARE(request_rec *)ap_proxy_make_fake_req(conn_rec *c, request_rec *r)
rp->proto_output_filters = c->output_filters;
rp->proto_input_filters = c->input_filters;
- rp->request_config = ap_create_request_config(c->pool);
+ rp->request_config = ap_create_request_config(r->pool);
proxy_run_create_req(r, rp);
return rp;
@@ -1041,6 +1041,7 @@ PROXY_DECLARE(void) ap_proxy_table_unmerge(apr_pool_t *p, apr_table_t *t, char *
PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
proxy_dir_conf *conf, const char *url)
{
+ proxy_req_conf *rconf;
struct proxy_alias *ent;
int i, l1, l2;
char *u;
@@ -1049,12 +1050,67 @@ PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
* XXX FIXME: Make sure this handled the ambiguous case of the :<PORT>
* after the hostname
*/
+ if (r->proxyreq != PROXYREQ_REVERSE) {
+ return url;
+ }
l1 = strlen(url);
- ent = (struct proxy_alias *)conf->raliases->elts;
+ if (conf->interpolate_env == 1) {
+ rconf = ap_get_module_config(r->request_config, &proxy_module);
+ ent = (struct proxy_alias *)rconf->raliases->elts;
+ }
+ else {
+ ent = (struct proxy_alias *)conf->raliases->elts;
+ }
for (i = 0; i < conf->raliases->nelts; i++) {
- l2 = strlen(ent[i].real);
- if (l1 >= l2 && strncasecmp(ent[i].real, url, l2) == 0) {
+ proxy_server_conf *sconf = (proxy_server_conf *)
+ ap_get_module_config(r->server->module_config, &proxy_module);
+ proxy_balancer *balancer;
+ const char *real;
+ real = ent[i].real;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "ppr: real: %s", real);
+ /*
+ * First check if mapping against a balancer and see
+ * if we have such a entity. If so, then we need to
+ * find the particulars of the actual worker which may
+ * or may not be the right one... basically, we need
+ * to find which member actually handled this request.
+ */
+ if ((strncasecmp(real, "balancer:", 9) == 0) &&
+ (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
+ int n;
+ proxy_worker *worker;
+ worker = (proxy_worker *)balancer->workers->elts;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "ppr: checking balancer: %s",
+ balancer->name);
+ for (n = 0; n < balancer->workers->nelts; n++) {
+ if (worker->port) {
+ u = apr_psprintf(r->pool, "%s://%s:%d/", worker->scheme,
+ worker->hostname, worker->port);
+ }
+ else {
+ u = apr_psprintf(r->pool, "%s://%s/", worker->scheme,
+ worker->hostname);
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "ppr: matching member (%s) and URL (%s)",
+ u, url);
+
+ l2 = strlen(u);
+ if (l1 >= l2 && strncasecmp(u, url, l2) == 0) {
+ u = apr_pstrcat(r->pool, ent[i].fake, &url[l2], NULL);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "ppr: matched member (%s)", u);
+ return ap_construct_url(r->pool, u, r);
+ }
+ worker++;
+ }
+ }
+
+ l2 = strlen(real);
+ if (l1 >= l2 && strncasecmp(real, url, l2) == 0) {
u = apr_pstrcat(r->pool, ent[i].fake, &url[l2], NULL);
return ap_construct_url(r->pool, u, r);
}
@@ -1073,6 +1129,8 @@ PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
PROXY_DECLARE(const char *) ap_proxy_cookie_reverse_map(request_rec *r,
proxy_dir_conf *conf, const char *str)
{
+ proxy_req_conf *rconf = ap_get_module_config(r->request_config,
+ &proxy_module);
struct proxy_alias *ent;
size_t len = strlen(str);
const char *newpath = NULL;
@@ -1087,6 +1145,10 @@ PROXY_DECLARE(const char *) ap_proxy_cookie_reverse_map(request_rec *r,
int pdiff = 0;
char *ret;
+ if (r->proxyreq != PROXYREQ_REVERSE) {
+ return str;
+ }
+
/*
* Find the match and replacement, but save replacing until we've done
* both path and domain so we know the new strlen
@@ -1097,7 +1159,12 @@ PROXY_DECLARE(const char *) ap_proxy_cookie_reverse_map(request_rec *r,
pathe = ap_strchr_c(pathp, ';');
l1 = pathe ? (pathe - pathp) : strlen(pathp);
pathe = pathp + l1 ;
- ent = (struct proxy_alias *)conf->cookie_paths->elts;
+ if (conf->interpolate_env == 1) {
+ ent = (struct proxy_alias *)rconf->cookie_paths->elts;
+ }
+ else {
+ ent = (struct proxy_alias *)conf->cookie_paths->elts;
+ }
for (i = 0; i < conf->cookie_paths->nelts; i++) {
l2 = strlen(ent[i].fake);
if (l1 >= l2 && strncmp(ent[i].fake, pathp, l2) == 0) {
@@ -1114,7 +1181,12 @@ PROXY_DECLARE(const char *) ap_proxy_cookie_reverse_map(request_rec *r,
domaine = ap_strchr_c(domainp, ';');
l1 = domaine ? (domaine - domainp) : strlen(domainp);
domaine = domainp + l1;
- ent = (struct proxy_alias *)conf->cookie_domains->elts;
+ if (conf->interpolate_env == 1) {
+ ent = (struct proxy_alias *)rconf->cookie_domains->elts;
+ }
+ else {
+ ent = (struct proxy_alias *)conf->cookie_domains->elts;
+ }
for (i = 0; i < conf->cookie_domains->nelts; i++) {
l2 = strlen(ent[i].fake);
if (l1 >= l2 && strncasecmp(ent[i].fake, domainp, l2) == 0) {
@@ -1323,6 +1395,7 @@ static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
* it can be disabled.
*/
apr_pool_create(&pool, p);
+ apr_pool_tag(pool, "proxy_worker_cp");
/*
* Alloc from the same pool as worker.
* proxy_conn_pool is permanently attached to the worker.
@@ -1550,6 +1623,9 @@ static apr_status_t connection_cleanup(void *theconn)
{
proxy_conn_rec *conn = (proxy_conn_rec *)theconn;
proxy_worker *worker = conn->worker;
+ apr_bucket_brigade *bb;
+ conn_rec *c;
+ request_rec *r;
/*
* If the connection pool is NULL the worker
@@ -1570,13 +1646,67 @@ static apr_status_t connection_cleanup(void *theconn)
}
#endif
+ r = conn->r;
+ if (conn->need_flush && r && (r->bytes_sent || r->eos_sent)) {
+ /*
+ * We need to ensure that buckets that may have been buffered in the
+ * network filters get flushed to the network. This is needed since
+ * these buckets have been created with the bucket allocator of the
+ * backend connection. This allocator either gets destroyed if
+ * conn->close is set or the worker address is not reusable which
+ * causes the connection to the backend to be closed or it will be used
+ * again by another frontend connection that wants to recycle the
+ * backend connection.
+ * In this case we could run into nasty race conditions (e.g. if the
+ * next user of the backend connection destroys the allocator before we
+ * sent the buckets to the network).
+ *
+ * Remark 1: Only do this if buckets where sent down the chain before
+ * that could still be buffered in the network filter. This is the case
+ * if we have sent an EOS bucket or if we actually sent buckets with
+ * data down the chain. In all other cases we either have not sent any
+ * buckets at all down the chain or we only sent meta buckets that are
+ * not EOS buckets down the chain. The only meta bucket that remains in
+ * this case is the flush bucket which would have removed all possibly
+ * buffered buckets in the network filter.
+ * If we sent a flush bucket in the case where not ANY buckets were
+ * sent down the chain, we break error handling which happens AFTER us.
+ *
+ * Remark 2: Doing a setaside does not help here as the buckets remain
+ * created by the wrong allocator in this case.
+ *
+ * Remark 3: Yes, this creates a possible performance penalty in the case
+ * of pipelined requests as we may send only a small amount of data over
+ * the wire.
+ */
+ c = r->connection;
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ if (r->eos_sent) {
+ /*
+ * If we have already sent an EOS bucket send directly to the
+ * connection based filters. We just want to flush the buckets
+ * if something hasn't been sent to the network yet.
+ */
+ ap_fflush(c->output_filters, bb);
+ }
+ else {
+ ap_fflush(r->output_filters, bb);
+ }
+ apr_brigade_destroy(bb);
+ conn->r = NULL;
+ conn->need_flush = 0;
+ }
+
/* determine if the connection need to be closed */
- if (conn->close_on_recycle || conn->close) {
+ if (conn->close_on_recycle || conn->close || worker->disablereuse ||
+ !worker->is_address_reusable) {
apr_pool_t *p = conn->pool;
- apr_pool_clear(conn->pool);
- memset(conn, 0, sizeof(proxy_conn_rec));
+ apr_pool_clear(p);
+ conn = apr_pcalloc(p, sizeof(proxy_conn_rec));
conn->pool = p;
conn->worker = worker;
+ apr_pool_create(&(conn->scpool), p);
+ apr_pool_tag(conn->scpool, "proxy_conn_scpool");
}
#if APR_HAS_THREADS
if (worker->hmax && worker->cp->res) {
@@ -1593,11 +1723,54 @@ static apr_status_t connection_cleanup(void *theconn)
return APR_SUCCESS;
}
+static void socket_cleanup(proxy_conn_rec *conn)
+{
+ conn->sock = NULL;
+ conn->connection = NULL;
+ apr_pool_clear(conn->scpool);
+}
+
+PROXY_DECLARE(apr_status_t) ap_proxy_ssl_connection_cleanup(proxy_conn_rec *conn,
+ request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ apr_status_t rv;
+
+ /*
+ * If we have an existing SSL connection it might be possible that the
+ * server sent some SSL message we have not read so far (e.g. a SSL
+ * shutdown message if the server closed the keepalive connection while
+ * the connection was held unused in our pool).
+ * So ensure that if present (=> APR_NONBLOCK_READ) it is read and
+ * processed. We don't expect any data to be in the returned brigade.
+ */
+ if (conn->sock && conn->connection) {
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ rv = ap_get_brigade(conn->connection->input_filters, bb,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ HUGE_STRING_LEN);
+ if ((rv != APR_SUCCESS) && !APR_STATUS_IS_EAGAIN(rv)) {
+ socket_cleanup(conn);
+ }
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ apr_off_t len;
+
+ rv = apr_brigade_length(bb, 0, &len);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "proxy: SSL cleanup brigade contained %"
+ APR_OFF_T_FMT " bytes of data.", len);
+ }
+ apr_brigade_destroy(bb);
+ }
+ return APR_SUCCESS;
+}
+
/* reslist constructor */
static apr_status_t connection_constructor(void **resource, void *params,
apr_pool_t *pool)
{
apr_pool_t *ctx;
+ apr_pool_t *scpool;
proxy_conn_rec *conn;
proxy_worker *worker = (proxy_worker *)params;
@@ -1607,9 +1780,20 @@ static apr_status_t connection_constructor(void **resource, void *params,
* when disconnecting from backend.
*/
apr_pool_create(&ctx, pool);
- conn = apr_pcalloc(pool, sizeof(proxy_conn_rec));
+ apr_pool_tag(ctx, "proxy_conn_pool");
+ /*
+ * Create another subpool that manages the data for the
+ * socket and the connection member of the proxy_conn_rec struct as we
+ * destroy this data more frequently than other data in the proxy_conn_rec
+ * struct like hostname and addr (at least in the case where we have
+ * keepalive connections that timed out).
+ */
+ apr_pool_create(&scpool, ctx);
+ apr_pool_tag(scpool, "proxy_conn_scpool");
+ conn = apr_pcalloc(ctx, sizeof(proxy_conn_rec));
conn->pool = ctx;
+ conn->scpool = scpool;
conn->worker = worker;
#if APR_HAS_THREADS
conn->inreslist = 1;
@@ -1725,8 +1909,13 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
if (!worker->retry_set) {
worker->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
}
- /* By default address is reusable */
- worker->is_address_reusable = 1;
+ /* By default address is reusable unless DisableReuse is set */
+ if (worker->disablereuse) {
+ worker->is_address_reusable = 0;
+ }
+ else {
+ worker->is_address_reusable = 1;
+ }
#if APR_HAS_THREADS
ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads);
@@ -1873,11 +2062,6 @@ PROXY_DECLARE(int) ap_proxy_release_connection(const char *proxy_function,
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: %s: has released connection for (%s)",
proxy_function, conn->worker->hostname);
- /* If there is a connection kill it's cleanup */
- if (conn->connection) {
- apr_pool_cleanup_kill(conn->connection->pool, conn, connection_cleanup);
- conn->connection = NULL;
- }
connection_cleanup(conn);
return OK;
@@ -1899,6 +2083,8 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
apr_status_t err = APR_SUCCESS;
apr_status_t uerr = APR_SUCCESS;
+ conn->r = r;
+
/*
* Break up the URL to determine the host to connect to
*/
@@ -1938,7 +2124,8 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
*
* TODO: Handle this much better...
*/
- if (!conn->hostname || !worker->is_address_reusable ||
+ if (!conn->hostname || !worker->is_address_reusable ||
+ worker->disablereuse ||
(r->connection->keepalives &&
(r->proxyreq == PROXYREQ_PROXY || r->proxyreq == PROXYREQ_REVERSE) &&
(strcasecmp(conn->hostname, uri->hostname) != 0) ) ) {
@@ -1950,14 +2137,7 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
conn->hostname = apr_pstrdup(conn->pool, uri->hostname);
conn->port = uri->port;
}
- if (conn->sock) {
- apr_socket_close(conn->sock);
- conn->sock = NULL;
- }
- if (conn->connection) {
- apr_pool_cleanup_kill(conn->connection->pool, conn, connection_cleanup);
- conn->connection = NULL;
- }
+ socket_cleanup(conn);
err = apr_sockaddr_info_get(&(conn->addr),
conn->hostname, APR_UNSPEC,
conn->port, 0,
@@ -2101,14 +2281,8 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
(proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
if (conn->sock) {
- /*
- * This increases the connection pool size
- * but the number of dropped connections is
- * relatively small compared to connection lifetime
- */
if (!(connected = is_socket_connected(conn->sock))) {
- apr_socket_close(conn->sock);
- conn->sock = NULL;
+ socket_cleanup(conn);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: %s: backend socket is disconnected.",
proxy_function);
@@ -2117,7 +2291,7 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
while (backend_addr && !connected) {
if ((rv = apr_socket_create(&newsock, backend_addr->family,
SOCK_STREAM, APR_PROTO_TCP,
- conn->pool)) != APR_SUCCESS) {
+ conn->scpool)) != APR_SUCCESS) {
loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
ap_log_error(APLOG_MARK, loglevel, rv, s,
"proxy: %s: error creating fam %d socket for target %s",
@@ -2132,6 +2306,7 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
backend_addr = backend_addr->next;
continue;
}
+ conn->connection = NULL;
#if !defined(TPF) && !defined(BEOS)
if (worker->recv_buffer_size > 0 &&
@@ -2221,13 +2396,25 @@ PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function,
apr_sockaddr_t *backend_addr = conn->addr;
int rc;
apr_interval_time_t current_timeout;
+ apr_bucket_alloc_t *bucket_alloc;
+
+ if (conn->connection) {
+ return OK;
+ }
/*
+ * We need to flush the buckets before we return the connection to the
+ * connection pool. See comment in connection_cleanup for why this is
+ * needed.
+ */
+ conn->need_flush = 1;
+ bucket_alloc = apr_bucket_alloc_create(conn->scpool);
+ /*
* The socket is now open, create a new backend server connection
*/
- conn->connection = ap_run_create_connection(c->pool, s, conn->sock,
- c->id, c->sbh,
- c->bucket_alloc);
+ conn->connection = ap_run_create_connection(conn->scpool, s, conn->sock,
+ 0, NULL,
+ bucket_alloc);
if (!conn->connection) {
/*
@@ -2239,17 +2426,9 @@ PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function,
"new connection to %pI (%s)", proxy_function,
backend_addr, conn->hostname);
/* XXX: Will be closed when proxy_conn is closed */
- apr_socket_close(conn->sock);
- conn->sock = NULL;
+ socket_cleanup(conn);
return HTTP_INTERNAL_SERVER_ERROR;
}
- /*
- * register the connection cleanup to client connection
- * so that the connection can be closed or reused
- */
- apr_pool_cleanup_register(c->pool, (void *)conn,
- connection_cleanup,
- apr_pool_cleanup_null);
/* For ssl connection to backend */
if (conn->is_ssl) {
diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c
index 01d5b43b..ff690167 100644
--- a/modules/ssl/mod_ssl.c
+++ b/modules/ssl/mod_ssl.c
@@ -227,17 +227,18 @@ static apr_status_t ssl_cleanup_pre_config(void *data)
#if HAVE_ENGINE_LOAD_BUILTIN_ENGINES
ENGINE_cleanup();
#endif
-#ifdef HAVE_OPENSSL
-#if OPENSSL_VERSION_NUMBER >= 0x00907001
- CRYPTO_cleanup_all_ex_data();
-#endif
-#endif
ERR_remove_state(0);
/* Don't call ERR_free_strings here; ERR_load_*_strings only
* actually load the error strings once per process due to static
* variable abuse in OpenSSL. */
+ /* Also don't call CRYPTO_cleanup_all_ex_data here; any registered
+ * ex_data indices may have been cached in static variables in
+ * OpenSSL; removing them may cause havoc. Notably, with OpenSSL
+ * versions >= 0.9.8f, COMP_CTX cleanups would not be run, which
+ * could result in a per-connection memory leak (!). */
+
/*
* TODO: determine somewhere we can safely shove out diagnostics
* (when enabled) at this late stage in the game: