summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/aaa/mod_authnz_ldap.c38
-rw-r--r--modules/cache/cache_storage.c59
-rw-r--r--modules/cache/mod_cache.c39
-rw-r--r--modules/cache/mod_cache.h6
-rw-r--r--modules/cache/mod_mem_cache.c24
-rw-r--r--modules/generators/mod_autoindex.c6
-rw-r--r--modules/http/http_protocol.c23
-rw-r--r--modules/ldap/util_ldap.c10
-rw-r--r--modules/proxy/NWGNUproxyscgi263
-rw-r--r--modules/proxy/config.m43
-rw-r--r--modules/proxy/mod_proxy_ftp.c63
-rw-r--r--modules/proxy/mod_proxy_scgi.c627
-rw-r--r--modules/ssl/ssl_engine_init.c2
-rw-r--r--modules/ssl/ssl_engine_pphrase.c3
-rw-r--r--modules/ssl/ssl_engine_vars.c6
15 files changed, 1102 insertions, 70 deletions
diff --git a/modules/aaa/mod_authnz_ldap.c b/modules/aaa/mod_authnz_ldap.c
index db13f1fd..9b61022a 100644
--- a/modules/aaa/mod_authnz_ldap.c
+++ b/modules/aaa/mod_authnz_ldap.c
@@ -527,6 +527,29 @@ static int authz_ldap_check_user_access(request_rec *r)
return DECLINED;
}
+ /* pre-scan for ldap-* requirements so we can get out of the way early */
+ for(x=0; x < reqs_arr->nelts; x++) {
+ if (! (reqs[x].method_mask & (AP_METHOD_BIT << m))) {
+ continue;
+ }
+
+ t = reqs[x].requirement;
+ w = ap_getword_white(r->pool, &t);
+
+ if (strncmp(w, "ldap-",5) == 0) {
+ required_ldap = 1;
+ break;
+ }
+ }
+
+ if (!required_ldap) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "[%" APR_PID_T_FMT "] auth_ldap authorise: declining to authorise (no ldap requirements)", getpid());
+ return DECLINED;
+ }
+
+
+
if (sec->host) {
ldc = util_ldap_connection_find(r, sec->host, sec->port,
sec->binddn, sec->bindpw, sec->deref,
@@ -559,12 +582,6 @@ static int authz_ldap_check_user_access(request_rec *r)
#endif
}
- if (!reqs_arr) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
- "[%" APR_PID_T_FMT "] auth_ldap authorise: no requirements array", getpid());
- return sec->auth_authoritative? HTTP_UNAUTHORIZED : DECLINED;
- }
-
/*
* If we have been authenticated by some other module than mod_auth_ldap,
* the req structure needed for authorization needs to be created
@@ -615,7 +632,6 @@ static int authz_ldap_check_user_access(request_rec *r)
w = ap_getword_white(r->pool, &t);
if (strcmp(w, "ldap-user") == 0) {
- required_ldap = 1;
if (req->dn == NULL || strlen(req->dn) == 0) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
"[%" APR_PID_T_FMT "] auth_ldap authorise: "
@@ -665,7 +681,6 @@ static int authz_ldap_check_user_access(request_rec *r)
}
}
else if (strcmp(w, "ldap-dn") == 0) {
- required_ldap = 1;
if (req->dn == NULL || strlen(req->dn) == 0) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
"[%" APR_PID_T_FMT "] auth_ldap authorise: "
@@ -693,7 +708,6 @@ static int authz_ldap_check_user_access(request_rec *r)
else if (strcmp(w, "ldap-group") == 0) {
struct mod_auth_ldap_groupattr_entry_t *ent = (struct mod_auth_ldap_groupattr_entry_t *) sec->groupattr->elts;
int i;
- required_ldap = 1;
if (sec->group_attrib_is_dn) {
if (req->dn == NULL || strlen(req->dn) == 0) {
@@ -743,7 +757,6 @@ static int authz_ldap_check_user_access(request_rec *r)
}
}
else if (strcmp(w, "ldap-attribute") == 0) {
- required_ldap = 1;
if (req->dn == NULL || strlen(req->dn) == 0) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
"[%" APR_PID_T_FMT "] auth_ldap authorise: "
@@ -779,7 +792,6 @@ static int authz_ldap_check_user_access(request_rec *r)
}
}
else if (strcmp(w, "ldap-filter") == 0) {
- required_ldap = 1;
if (req->dn == NULL || strlen(req->dn) == 0) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
"[%" APR_PID_T_FMT "] auth_ldap authorise: "
@@ -843,9 +855,9 @@ static int authz_ldap_check_user_access(request_rec *r)
return OK;
}
- if (!required_ldap || !sec->auth_authoritative) {
+ if (!sec->auth_authoritative) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
- "[%" APR_PID_T_FMT "] auth_ldap authorise: declining to authorise", getpid());
+ "[%" APR_PID_T_FMT "] auth_ldap authorise: declining to authorise (not authoritative)", getpid());
return DECLINED;
}
diff --git a/modules/cache/cache_storage.c b/modules/cache/cache_storage.c
index 7b99f3ed..1af26d77 100644
--- a/modules/cache/cache_storage.c
+++ b/modules/cache/cache_storage.c
@@ -357,6 +357,7 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
char *port_str, *hn, *lcs;
const char *hostname, *scheme;
int i;
+ char *path, *querystring;
cache = (cache_request_rec *) ap_get_module_config(r->request_config,
&cache_module);
@@ -474,14 +475,65 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
port_str = apr_psprintf(p, ":%u", ap_get_server_port(r));
}
+ /*
+ * Check if we need to ignore session identifiers in the URL and do so
+ * if needed.
+ */
+ path = r->parsed_uri.path;
+ querystring = r->parsed_uri.query;
+ if (conf->ignore_session_id->nelts) {
+ int i;
+ char **identifier;
+
+ identifier = (char **)conf->ignore_session_id->elts;
+ for (i = 0; i < conf->ignore_session_id->nelts; i++, identifier++) {
+ int len;
+ char *param;
+
+ len = strlen(*identifier);
+ /*
+ * Check that we have a parameter separator in the last segment
+ * of the path and that the parameter matches our identifier
+ */
+ if ((param = strrchr(path, ';'))
+ && !strncmp(param + 1, *identifier, len)
+ && (*(param + len + 1) == '=')
+ && !strchr(param + len + 2, '/')) {
+ path = apr_pstrndup(p, path, param - path);
+ break;
+ }
+ /*
+ * Check if the identifier is in the querystring and cut it out.
+ */
+ if (querystring
+ && (param = strstr(querystring, *identifier))
+ && (*(param + len) == '=')
+ ) {
+ char *amp;
+
+ if (querystring != param) {
+ querystring = apr_pstrndup(p, querystring,
+ param - querystring);
+ }
+ else {
+ querystring = "";
+ }
+ if ((amp = strchr(param + len + 1, '&'))) {
+ querystring = apr_pstrcat(p, querystring, amp + 1, NULL);
+ }
+ break;
+ }
+ }
+ }
+
/* Key format is a URI, optionally without the query-string */
if (conf->ignorequerystring) {
*key = apr_pstrcat(p, scheme, "://", hostname, port_str,
- r->parsed_uri.path, "?", NULL);
+ path, "?", NULL);
}
else {
*key = apr_pstrcat(p, scheme, "://", hostname, port_str,
- r->parsed_uri.path, "?", r->parsed_uri.query, NULL);
+ path, "?", querystring, NULL);
}
/*
@@ -493,6 +545,9 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* handler during following requests.
*/
cache->key = apr_pstrdup(r->pool, *key);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
+ "cache: Key for entity %s?%s is %s", r->parsed_uri.path,
+ r->parsed_uri.query, *key);
return APR_SUCCESS;
}
diff --git a/modules/cache/mod_cache.c b/modules/cache/mod_cache.c
index a35b2abc..49a37423 100644
--- a/modules/cache/mod_cache.c
+++ b/modules/cache/mod_cache.c
@@ -950,6 +950,9 @@ static void * create_cache_config(apr_pool_t *p, server_rec *s)
/* flag indicating that query-string should be ignored when caching */
ps->ignorequerystring = 0;
ps->ignorequerystring_set = 0;
+ /* array of identifiers that should not be used for key calculation */
+ ps->ignore_session_id = apr_array_make(p, 10, sizeof(char *));
+ ps->ignore_session_id_set = CACHE_IGNORE_SESSION_ID_UNSET;
return ps;
}
@@ -999,6 +1002,10 @@ static void * merge_cache_config(apr_pool_t *p, void *basev, void *overridesv)
(overrides->ignorequerystring_set == 0)
? base->ignorequerystring
: overrides->ignorequerystring;
+ ps->ignore_session_id =
+ (overrides->ignore_session_id_set == CACHE_IGNORE_SESSION_ID_UNSET)
+ ? base->ignore_session_id
+ : overrides->ignore_session_id;
return ps;
}
static const char *set_cache_ignore_no_last_mod(cmd_parms *parms, void *dummy,
@@ -1082,6 +1089,34 @@ static const char *add_ignore_header(cmd_parms *parms, void *dummy,
return NULL;
}
+static const char *add_ignore_session_id(cmd_parms *parms, void *dummy,
+ const char *identifier)
+{
+ cache_server_conf *conf;
+ char **new;
+
+ conf =
+ (cache_server_conf *)ap_get_module_config(parms->server->module_config,
+ &cache_module);
+ if (!strncasecmp(identifier, "None", 4)) {
+ /* if identifier None is listed clear array */
+ conf->ignore_session_id->nelts = 0;
+ }
+ else {
+ if ((conf->ignore_session_id_set == CACHE_IGNORE_SESSION_ID_UNSET) ||
+ (conf->ignore_session_id->nelts)) {
+ /*
+ * Only add identifier if no "None" has been found in identifier
+ * list so far.
+ */
+ new = (char **)apr_array_push(conf->ignore_session_id);
+ (*new) = (char *)identifier;
+ }
+ }
+ conf->ignore_session_id_set = CACHE_IGNORE_SESSION_ID_SET;
+ return NULL;
+}
+
static const char *add_cache_enable(cmd_parms *parms, void *dummy,
const char *type,
const char *url)
@@ -1242,6 +1277,10 @@ static const command_rec cache_cmds[] =
AP_INIT_FLAG("CacheIgnoreQueryString", set_cache_ignore_querystring,
NULL, RSRC_CONF,
"Ignore query-string when caching"),
+ AP_INIT_ITERATE("CacheIgnoreURLSessionIdentifiers", add_ignore_session_id,
+ NULL, RSRC_CONF, "A space separated list of session "
+ "identifiers that should be ignored for creating the key "
+ "of the cached entity."),
AP_INIT_TAKE1("CacheLastModifiedFactor", set_cache_factor, NULL, RSRC_CONF,
"The factor used to estimate Expires date from "
"LastModified date"),
diff --git a/modules/cache/mod_cache.h b/modules/cache/mod_cache.h
index 4fe8dfc4..72653ae3 100644
--- a/modules/cache/mod_cache.h
+++ b/modules/cache/mod_cache.h
@@ -153,6 +153,12 @@ typedef struct {
/** ignore query-string when caching */
int ignorequerystring;
int ignorequerystring_set;
+ /** store the identifiers that should not be used for key calculation */
+ apr_array_header_t *ignore_session_id;
+ /* flag if CacheIgnoreURLSessionIdentifiers has been set */
+ #define CACHE_IGNORE_SESSION_ID_SET 1
+ #define CACHE_IGNORE_SESSION_ID_UNSET 0
+ int ignore_session_id_set;
} cache_server_conf;
/* cache info information */
diff --git a/modules/cache/mod_mem_cache.c b/modules/cache/mod_mem_cache.c
index 7bdaeac9..48bd785c 100644
--- a/modules/cache/mod_mem_cache.c
+++ b/modules/cache/mod_mem_cache.c
@@ -60,6 +60,7 @@ typedef enum {
typedef struct mem_cache_object {
apr_pool_t *pool;
+ apr_thread_mutex_t *lock; /* pools aren't thread-safe; use this lock when accessing this pool */
cache_type_e type;
apr_table_t *header_out;
apr_table_t *req_hdrs; /* for Vary negotiation */
@@ -91,6 +92,8 @@ typedef struct {
} mem_cache_conf;
static mem_cache_conf *sconf;
+static int threaded_mpm;
+
#define DEFAULT_MAX_CACHE_SIZE 100*1024
#define DEFAULT_MIN_CACHE_OBJECT_SIZE 1
#define DEFAULT_MAX_CACHE_OBJECT_SIZE 10000
@@ -216,9 +219,8 @@ static void cleanup_cache_object(cache_object_t *obj)
close(mobj->fd);
#endif
}
+ apr_pool_destroy(mobj->pool);
}
-
- apr_pool_destroy(mobj->pool);
}
static apr_status_t decrement_refcount(void *arg)
{
@@ -359,6 +361,10 @@ static int create_entity(cache_handle_t *h, cache_type_e type_e,
mobj = apr_pcalloc(pool, sizeof(*mobj));
mobj->pool = pool;
+ if (threaded_mpm) {
+ apr_thread_mutex_create(&mobj->lock, APR_THREAD_MUTEX_DEFAULT, pool);
+ }
+
/* Finish initing the cache object */
apr_atomic_set32(&obj->refcount, 1);
mobj->total_refs = 1;
@@ -601,7 +607,13 @@ static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info
* - The original response headers (for returning with a cached response)
* - The body of the message
*/
+ if (mobj->lock) {
+ apr_thread_mutex_lock(mobj->lock);
+ }
mobj->req_hdrs = deep_table_copy(mobj->pool, r->headers_in);
+ if (mobj->lock) {
+ apr_thread_mutex_unlock(mobj->lock);
+ }
/* Precompute how much storage we need to hold the headers */
headers_out = apr_table_overlay(r->pool, r->headers_out,
@@ -622,7 +634,13 @@ static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info
r->content_encoding);
}
+ if (mobj->lock) {
+ apr_thread_mutex_lock(mobj->lock);
+ }
mobj->header_out = deep_table_copy(mobj->pool, headers_out);
+ if (mobj->lock) {
+ apr_thread_mutex_unlock(mobj->lock);
+ }
/* Init the info struct */
obj->info.status = info->status;
@@ -815,8 +833,6 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_bri
static int mem_cache_post_config(apr_pool_t *p, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
- int threaded_mpm;
-
/* Sanity check the cache configuration */
if (sconf->min_cache_object_size >= sconf->max_cache_object_size) {
ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s,
diff --git a/modules/generators/mod_autoindex.c b/modules/generators/mod_autoindex.c
index 1c38cf28..ef17f666 100644
--- a/modules/generators/mod_autoindex.c
+++ b/modules/generators/mod_autoindex.c
@@ -1762,9 +1762,9 @@ static void output_directories(struct ent **ar, int n,
desc_width), NULL);
}
}
- }
- else {
- ap_rputs("</td><td>&nbsp;", r);
+ else {
+ ap_rputs("</td><td>&nbsp;", r);
+ }
}
ap_rputs("</td></tr>\n", r);
}
diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
index 046e98bc..fdd7d757 100644
--- a/modules/http/http_protocol.c
+++ b/modules/http/http_protocol.c
@@ -1005,16 +1005,19 @@ static const char *get_canned_error_string(int status,
"request-header field overlap the current extent\n"
"of the selected resource.</p>\n");
case HTTP_EXPECTATION_FAILED:
- return(apr_pstrcat(p,
- "<p>The expectation given in the Expect "
- "request-header"
- "\nfield could not be met by this server.</p>\n"
- "<p>The client sent<pre>\n Expect: ",
- ap_escape_html(r->pool, apr_table_get(r->headers_in, "Expect")),
- "\n</pre>\n"
- "but we only allow the 100-continue "
- "expectation.</p>\n",
- NULL));
+ s1 = apr_table_get(r->headers_in, "Expect");
+ if (s1)
+ s1 = apr_pstrcat(p,
+ "<p>The expectation given in the Expect request-header\n"
+ "field could not be met by this server.\n"
+ "The client sent<pre>\n Expect: ",
+ ap_escape_html(r->pool, s1), "\n</pre>\n",
+ NULL);
+ else
+ s1 = "<p>No expectation was seen, the Expect request-header \n"
+ "field was not presented by the client.\n";
+ return add_optional_notes(r, s1, "error-notes", "</p>"
+ "<p>Only the 100-continue expectation is supported.</p>\n");
case HTTP_UNPROCESSABLE_ENTITY:
return("<p>The server understands the media type of the\n"
"request entity, but was unable to process the\n"
diff --git a/modules/ldap/util_ldap.c b/modules/ldap/util_ldap.c
index c9cc6e98..5b432696 100644
--- a/modules/ldap/util_ldap.c
+++ b/modules/ldap/util_ldap.c
@@ -2075,9 +2075,9 @@ static const command_rec util_ldap_cmds[] = {
AP_INIT_TAKE1("LDAPCacheEntries", util_ldap_set_cache_entries,
NULL, RSRC_CONF,
"Set the maximum number of entries that are possible in the "
- "LDAP search cache. Use 0 for no limit. "
- "-1 disables the cache. (default: 1024)"),
-
+ "LDAP search cache. Use 0 or -1 to disable the search cache "
+ "(default: 1024)"),
+
AP_INIT_TAKE1("LDAPCacheTTL", util_ldap_set_cache_ttl,
NULL, RSRC_CONF,
"Set the maximum time (in seconds) that an item can be "
@@ -2087,8 +2087,8 @@ static const command_rec util_ldap_cmds[] = {
AP_INIT_TAKE1("LDAPOpCacheEntries", util_ldap_set_opcache_entries,
NULL, RSRC_CONF,
"Set the maximum number of entries that are possible "
- "in the LDAP compare cache. Use 0 for no limit. "
- "Use -1 to disable the cache. (default: 1024)"),
+ "in the LDAP compare cache. Use 0 or -1 to disable the compare cache "
+ "(default: 1024)"),
AP_INIT_TAKE1("LDAPOpCacheTTL", util_ldap_set_opcache_ttl,
NULL, RSRC_CONF,
diff --git a/modules/proxy/NWGNUproxyscgi b/modules/proxy/NWGNUproxyscgi
new file mode 100644
index 00000000..40d8bb24
--- /dev/null
+++ b/modules/proxy/NWGNUproxyscgi
@@ -0,0 +1,263 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)\build\NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(AP_WORK)/include \
+ $(NWOS) \
+ $(AP_WORK)/modules/http \
+ $(AP_WORK)/modules/arch/netware \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(APR) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxyscg
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Proxy SCGI Sub-Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Proxy SCGI Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)\build\NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM = _LibCPrelude
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM = _LibCPostlude
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS = AUTOUNLOAD, PSEUDOPREEMPTION
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/proxyscgi.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_proxy_scgi.o \
+ $(OBJDIR)/proxy_util.o \
+ $(OBJDIR)/libprews.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ libcpre.o \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ proxy \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @$(APR)/aprlib.imp \
+ @$(NWOS)/httpd.imp \
+ @$(OBJDIR)/mod_proxy.imp \
+ @libc.imp \
+ $(EOLIST)
+
+# Don't link with Winsock if standard sockets are being used
+ifndef USE_STDSOCKETS
+FILES_nlm_Ximports += @ws2nlm.imp \
+ $(EOLIST)
+endif
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_scgi_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)\build\NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+vpath %.c ../arch/netware
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(AP_WORK)\build\NWGNUtail.inc
+
+
diff --git a/modules/proxy/config.m4 b/modules/proxy/config.m4
index da0a0b31..062bd15f 100644
--- a/modules/proxy/config.m4
+++ b/modules/proxy/config.m4
@@ -16,6 +16,7 @@ APACHE_MODULE(proxy, Apache proxy module, $proxy_objs, , $proxy_mods_enable)
proxy_connect_objs="mod_proxy_connect.lo"
proxy_ftp_objs="mod_proxy_ftp.lo"
proxy_http_objs="mod_proxy_http.lo"
+proxy_scgi_objs="mod_proxy_scgi.lo"
proxy_ajp_objs="mod_proxy_ajp.lo ajp_header.lo ajp_link.lo ajp_msg.lo ajp_utils.lo"
proxy_balancer_objs="mod_proxy_balancer.lo"
@@ -26,6 +27,7 @@ case "$host" in
proxy_connect_objs="$proxy_connect_objs mod_proxy.la"
proxy_ftp_objs="$proxy_ftp_objs mod_proxy.la"
proxy_http_objs="$proxy_http_objs mod_proxy.la"
+ proxy_scgi_objs="$proxy_scgi_objs mod_proxy.la"
proxy_ajp_objs="$proxy_ajp_objs mod_proxy.la"
proxy_balancer_objs="$proxy_balancer_objs mod_proxy.la"
;;
@@ -34,6 +36,7 @@ esac
APACHE_MODULE(proxy_connect, Apache proxy CONNECT module, $proxy_connect_objs, , $proxy_mods_enable)
APACHE_MODULE(proxy_ftp, Apache proxy FTP module, $proxy_ftp_objs, , $proxy_mods_enable)
APACHE_MODULE(proxy_http, Apache proxy HTTP module, $proxy_http_objs, , $proxy_mods_enable)
+APACHE_MODULE(proxy_scgi, Apache proxy SCGI module, $proxy_scgi_objs, , $proxy_mods_enable)
APACHE_MODULE(proxy_ajp, Apache proxy AJP module, $proxy_ajp_objs, , $proxy_mods_enable)
APACHE_MODULE(proxy_balancer, Apache proxy BALANCER module, $proxy_balancer_objs, , $proxy_mods_enable)
diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
index 639f9f8d..924ac310 100644
--- a/modules/proxy/mod_proxy_ftp.c
+++ b/modules/proxy/mod_proxy_ftp.c
@@ -604,6 +604,31 @@ static apr_status_t proxy_send_dir_filter(ap_filter_t *f,
return APR_SUCCESS;
}
+/* Parse EPSV reply and return port, or zero on error. */
+static apr_port_t parse_epsv_reply(const char *reply)
+{
+ const char *p;
+ char *ep;
+ long port;
+
+ /* Reply syntax per RFC 2428: "229 blah blah (|||port|)" where '|'
+ * can be any character in ASCII from 33-126, obscurely. Verify
+ * the syntax. */
+ p = ap_strchr_c(reply, '(');
+ if (p == NULL || !p[1] || p[1] != p[2] || p[1] != p[3]
+ || p[4] == p[1]) {
+ return 0;
+ }
+
+ errno = 0;
+ port = strtol(p + 4, &ep, 10);
+ if (errno || port < 1 || port > 65535 || ep[0] != p[1] || ep[1] != ')') {
+ return 0;
+ }
+
+ return (apr_port_t)port;
+}
+
/*
* Generic "send FTP command to server" routine, using the control socket.
* Returns the FTP returncode (3 digit code)
@@ -887,6 +912,11 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
if ((password = apr_table_get(r->headers_in, "Authorization")) != NULL
&& strcasecmp(ap_getword(r->pool, &password, ' '), "Basic") == 0
&& (password = ap_pbase64decode(r->pool, password))[0] != ':') {
+ /* Check the decoded string for special characters. */
+ if (!ftp_check_string(password)) {
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ "user credentials contained invalid character");
+ }
/*
* Note that this allocation has to be made from r->connection->pool
* because it has the lifetime of the connection. The other
@@ -1210,26 +1240,11 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
return ftp_proxyerror(r, backend, HTTP_BAD_GATEWAY, ftpmessage);
}
else if (rc == 229) {
- char *pstr;
- char *tok_cntx;
+ /* Parse the port out of the EPSV reply. */
+ data_port = parse_epsv_reply(ftpmessage);
- pstr = ftpmessage;
- pstr = apr_strtok(pstr, " ", &tok_cntx); /* separate result code */
- if (pstr != NULL) {
- if (*(pstr + strlen(pstr) + 1) == '=') {
- pstr += strlen(pstr) + 2;
- }
- else {
- pstr = apr_strtok(NULL, "(", &tok_cntx); /* separate address &
- * port params */
- if (pstr != NULL)
- pstr = apr_strtok(NULL, ")", &tok_cntx);
- }
- }
-
- if (pstr) {
+ if (data_port) {
apr_sockaddr_t *epsv_addr;
- data_port = atoi(pstr + 3);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: FTP: EPSV contacting remote host on port %d",
@@ -1272,10 +1287,6 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
connect = 1;
}
}
- else {
- /* and try the regular way */
- apr_socket_close(data_sock);
- }
}
}
@@ -1364,10 +1375,6 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
connect = 1;
}
}
- else {
- /* and try the regular way */
- apr_socket_close(data_sock);
- }
}
}
/*bypass:*/
@@ -1851,7 +1858,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
* for a slow client to eat these bytes
*/
ap_flush_conn(data);
- apr_socket_close(data_sock);
+ if (data_sock) {
+ apr_socket_close(data_sock);
+ }
data_sock = NULL;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: FTP: data connection closed");
diff --git a/modules/proxy/mod_proxy_scgi.c b/modules/proxy/mod_proxy_scgi.c
new file mode 100644
index 00000000..e152c277
--- /dev/null
+++ b/modules/proxy/mod_proxy_scgi.c
@@ -0,0 +1,627 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * mod_proxy_scgi.c
+ * Proxy backend module for the SCGI protocol
+ * (http://python.ca/scgi/protocol.txt)
+ *
+ * André Malo (nd/perlig.de), August 2007
+ */
+
+#define APR_WANT_MEMFUNC
+#define APR_WANT_STRFUNC
+#include "apr_strings.h"
+#include "apr_hooks.h"
+#include "apr_optional_hooks.h"
+#include "apr_buckets.h"
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_script.h"
+
+#include "mod_proxy.h"
+
+
+#define SCHEME "scgi"
+#define PROXY_FUNCTION "SCGI"
+#define SCGI_MAGIC "SCGI"
+#define SCGI_PROTOCOL_VERSION "1"
+#define SCGI_DEFAULT_PORT (4000)
+
+/* just protect from typos */
+#define CONTENT_LENGTH "CONTENT_LENGTH"
+#define GATEWAY_INTERFACE "GATEWAY_INTERFACE"
+
+module AP_MODULE_DECLARE_DATA proxy_scgi_module;
+
+
+typedef enum {
+ scgi_internal_redirect,
+ scgi_sendfile
+} scgi_request_type;
+
+typedef struct {
+ const char *location; /* target URL */
+ scgi_request_type type; /* type of request */
+} scgi_request_config;
+
+const char *scgi_sendfile_off = "off";
+const char *scgi_sendfile_on = "X-Sendfile";
+
+typedef struct {
+ const char *sendfile;
+ int internal_redirect;
+} scgi_config;
+
+
+/*
+ * We create our own bucket type, which is actually derived (c&p) from the
+ * socket bucket.
+ * Maybe some time this should be made more abstract (like passing an
+ * interception function to read or something) and go into the ap_ or
+ * even apr_ namespace.
+ */
+
+typedef struct {
+ apr_socket_t *sock;
+ apr_off_t *counter;
+} socket_ex_data;
+
+static apr_bucket *bucket_socket_ex_create(socket_ex_data *data,
+ apr_bucket_alloc_t *list);
+
+
+static apr_status_t bucket_socket_ex_read(apr_bucket *a, const char **str,
+ apr_size_t *len,
+ apr_read_type_e block)
+{
+ socket_ex_data *data = a->data;
+ apr_socket_t *p = data->sock;
+ char *buf;
+ apr_status_t rv;
+ apr_interval_time_t timeout;
+
+ if (block == APR_NONBLOCK_READ) {
+ apr_socket_timeout_get(p, &timeout);
+ apr_socket_timeout_set(p, 0);
+ }
+
+ *str = NULL;
+ *len = APR_BUCKET_BUFF_SIZE;
+ buf = apr_bucket_alloc(*len, a->list);
+
+ rv = apr_socket_recv(p, buf, len);
+
+ if (block == APR_NONBLOCK_READ) {
+ apr_socket_timeout_set(p, timeout);
+ }
+
+ if (rv != APR_SUCCESS && rv != APR_EOF) {
+ apr_bucket_free(buf);
+ return rv;
+ }
+
+ if (*len > 0) {
+ apr_bucket_heap *h;
+
+ /* count for stats */
+ *data->counter += *len;
+
+ /* Change the current bucket to refer to what we read */
+ a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
+ h = a->data;
+ h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
+ *str = buf;
+ APR_BUCKET_INSERT_AFTER(a, bucket_socket_ex_create(data, a->list));
+ }
+ else {
+ apr_bucket_free(buf);
+ a = apr_bucket_immortal_make(a, "", 0);
+ *str = a->data;
+ }
+ return APR_SUCCESS;
+}
+
+static const apr_bucket_type_t bucket_type_socket_ex = {
+ "SOCKET_EX", 5, APR_BUCKET_DATA,
+ apr_bucket_destroy_noop,
+ bucket_socket_ex_read,
+ apr_bucket_setaside_notimpl,
+ apr_bucket_split_notimpl,
+ apr_bucket_copy_notimpl
+};
+
+static apr_bucket *bucket_socket_ex_make(apr_bucket *b, socket_ex_data *data)
+{
+ b->type = &bucket_type_socket_ex;
+ b->length = (apr_size_t)(-1);
+ b->start = -1;
+ b->data = data;
+ return b;
+}
+
+static apr_bucket *bucket_socket_ex_create(socket_ex_data *data,
+ apr_bucket_alloc_t *list)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ return bucket_socket_ex_make(b, data);
+}
+
+
+/*
+ * Canonicalize scgi-like URLs.
+ */
+static int scgi_canon(request_rec *r, char *url)
+{
+ char *host, sport[sizeof(":65535")];
+ const char *err, *path;
+ apr_port_t port = SCGI_DEFAULT_PORT;
+
+ if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) {
+ return DECLINED;
+ }
+ url += sizeof(SCHEME); /* Keep slashes */
+
+ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "error parsing URL %s: %s", url, err);
+ return HTTP_BAD_REQUEST;
+ }
+
+ apr_snprintf(sport, sizeof(sport), ":%u", port);
+
+ if (ap_strchr(host, ':')) { /* if literal IPv6 address */
+ host = apr_pstrcat(r->pool, "[", host, "]", NULL);
+ }
+
+ path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
+ r->proxyreq);
+ if (!path) {
+ return HTTP_BAD_REQUEST;
+ }
+
+ r->filename = apr_pstrcat(r->pool, "proxy:" SCHEME "://", host, sport, "/",
+ path, NULL);
+ r->path_info = apr_pstrcat(r->pool, "/", path, NULL);
+ return OK;
+}
+
+
+/*
+ * Send a block of data, ensure, everything is sent
+ */
+static int sendall(proxy_conn_rec *conn, const char *buf, apr_size_t length,
+ request_rec *r)
+{
+ apr_status_t rv;
+ apr_size_t written;
+
+ while (length > 0) {
+ written = length;
+ if ((rv = apr_socket_send(conn->sock, buf, &written)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: " PROXY_FUNCTION ": sending data to "
+ "%s:%u failed", conn->hostname, conn->port);
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+
+ /* count for stats */
+ conn->worker->s->transferred += written;
+ buf += written;
+ length -= written;
+ }
+
+ return OK;
+}
+
+
+/*
+ * Send SCGI header block
+ */
+static int send_headers(request_rec *r, proxy_conn_rec *conn)
+{
+ char *buf, *cp, *bodylen;
+ const char *ns_len;
+ const apr_array_header_t *env_table;
+ const apr_table_entry_t *env;
+ apr_size_t j, len, bodylen_size;
+ apr_size_t headerlen = sizeof(CONTENT_LENGTH)
+ + sizeof(SCGI_MAGIC)
+ + sizeof(SCGI_PROTOCOL_VERSION);
+
+ ap_add_common_vars(r);
+ ap_add_cgi_vars(r);
+
+ /*
+ * The header blob basically takes the environment and concatenates
+ * keys and values using 0 bytes. There are special treatments here:
+ * - GATEWAY_INTERFACE and SCGI_MAGIC are dropped
+ * - CONTENT_LENGTH is always set and must be sent as the very first
+ * variable
+ *
+ * Additionally it's wrapped into a so-called netstring (see SCGI spec)
+ */
+ env_table = apr_table_elts(r->subprocess_env);
+ env = (apr_table_entry_t *)env_table->elts;
+ for (j=0; j<env_table->nelts; ++j) {
+ if ( (!strcmp(env[j].key, GATEWAY_INTERFACE))
+ || (!strcmp(env[j].key, CONTENT_LENGTH))
+ || (!strcmp(env[j].key, SCGI_MAGIC))) {
+ continue;
+ }
+ headerlen += strlen(env[j].key) + strlen(env[j].val) + 2;
+ }
+ bodylen = apr_psprintf(r->pool, "%" APR_OFF_T_FMT, r->remaining);
+ bodylen_size = strlen(bodylen) + 1;
+ headerlen += bodylen_size;
+
+ ns_len = apr_psprintf(r->pool, "%" APR_SIZE_T_FMT ":", headerlen);
+ len = strlen(ns_len);
+ headerlen += len + 1; /* 1 == , */
+ cp = buf = apr_palloc(r->pool, headerlen);
+ memcpy(cp, ns_len, len);
+ cp += len;
+
+ memcpy(cp, CONTENT_LENGTH, sizeof(CONTENT_LENGTH));
+ cp += sizeof(CONTENT_LENGTH);
+ memcpy(cp, bodylen, bodylen_size);
+ cp += bodylen_size;
+ memcpy(cp, SCGI_MAGIC, sizeof(SCGI_MAGIC));
+ cp += sizeof(SCGI_MAGIC);
+ memcpy(cp, SCGI_PROTOCOL_VERSION, sizeof(SCGI_PROTOCOL_VERSION));
+ cp += sizeof(SCGI_PROTOCOL_VERSION);
+
+ for (j=0; j<env_table->nelts; ++j) {
+ if ( (!strcmp(env[j].key, GATEWAY_INTERFACE))
+ || (!strcmp(env[j].key, CONTENT_LENGTH))
+ || (!strcmp(env[j].key, SCGI_MAGIC))) {
+ continue;
+ }
+ len = strlen(env[j].key) + 1;
+ memcpy(cp, env[j].key, len);
+ cp += len;
+ len = strlen(env[j].val) + 1;
+ memcpy(cp, env[j].val, len);
+ cp += len;
+ }
+ *cp++ = ',';
+
+ return sendall(conn, buf, headerlen, r);
+}
+
+
+/*
+ * Send request body (if any)
+ */
+static int send_request_body(request_rec *r, proxy_conn_rec *conn)
+{
+ if (ap_should_client_block(r)) {
+ char *buf = apr_palloc(r->pool, AP_IOBUFSIZE);
+ int status;
+ apr_size_t readlen;
+
+ readlen = ap_get_client_block(r, buf, AP_IOBUFSIZE);
+ while (readlen > 0) {
+ status = sendall(conn, buf, readlen, r);
+ if (status != OK) {
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+ readlen = ap_get_client_block(r, buf, AP_IOBUFSIZE);
+ }
+ if (readlen == -1) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "proxy: " PROXY_FUNCTION ": receiving request body "
+ "failed");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ return OK;
+}
+
+
+/*
+ * Fetch response from backend and pass back to the front
+ */
+static int pass_response(request_rec *r, proxy_conn_rec *conn)
+{
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ const char *location;
+ scgi_config *conf;
+ socket_ex_data *sock_data;
+ int status;
+
+ sock_data = apr_palloc(r->pool, sizeof(*sock_data));
+ sock_data->sock = conn->sock;
+ sock_data->counter = &conn->worker->s->read;
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ b = bucket_socket_ex_create(sock_data, r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ status = ap_scan_script_header_err_brigade(r, bb, NULL);
+ if (status != OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "proxy: " PROXY_FUNCTION ": error reading response "
+ "headers from %s:%u", conn->hostname, conn->port);
+ r->status_line = NULL;
+ apr_brigade_destroy(bb);
+ return status;
+ }
+
+ conf = ap_get_module_config(r->per_dir_config, &proxy_scgi_module);
+ if (conf->sendfile && conf->sendfile != scgi_sendfile_off) {
+ short err = 1;
+
+ location = apr_table_get(r->err_headers_out, conf->sendfile);
+ if (!location) {
+ err = 0;
+ location = apr_table_get(r->headers_out, conf->sendfile);
+ }
+ if (location) {
+ scgi_request_config *req_conf = apr_palloc(r->pool,
+ sizeof(*req_conf));
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "proxy: " PROXY_FUNCTION ": Found %s: %s - "
+ "preparing subrequest.",
+ conf->sendfile, location);
+
+ if (err) {
+ apr_table_unset(r->err_headers_out, conf->sendfile);
+ }
+ else {
+ apr_table_unset(r->headers_out, conf->sendfile);
+ }
+ req_conf->location = location;
+ req_conf->type = scgi_sendfile;
+ ap_set_module_config(r->request_config, &proxy_scgi_module,
+ req_conf);
+ apr_brigade_destroy(bb);
+ return OK;
+ }
+ }
+
+ if (conf->internal_redirect && r->status == HTTP_OK) {
+ location = apr_table_get(r->headers_out, "Location");
+ if (location && *location == '/') {
+ scgi_request_config *req_conf = apr_palloc(r->pool,
+ sizeof(*req_conf));
+ req_conf->location = location;
+ req_conf->type = scgi_internal_redirect;
+ ap_set_module_config(r->request_config, &proxy_scgi_module,
+ req_conf);
+ apr_brigade_destroy(bb);
+ return OK;
+ }
+ }
+
+ /* XXX: What could we do with that return code? */
+ (void)ap_pass_brigade(r->output_filters, bb);
+
+ return OK;
+}
+
+/*
+ * Internal redirect / subrequest handler, working on request_status hook
+ */
+static int scgi_request_status(int *status, request_rec *r)
+{
+ scgi_request_config *req_conf;
+
+ if ( (*status == OK)
+ && (req_conf = ap_get_module_config(r->request_config,
+ &proxy_scgi_module))) {
+ switch (req_conf->type) {
+ case scgi_internal_redirect:
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "proxy: " PROXY_FUNCTION ": Internal redirect to %s",
+ req_conf->location);
+
+ r->status_line = NULL;
+ if (r->method_number != M_GET) {
+ /* keep HEAD, which is passed around as M_GET, too */
+ r->method = "GET";
+ r->method_number = M_GET;
+ }
+ apr_table_unset(r->headers_in, "Content-Length");
+ ap_internal_redirect_handler(req_conf->location, r);
+ return OK;
+ /* break; */
+
+ case scgi_sendfile:
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "proxy: " PROXY_FUNCTION ": File subrequest to %s",
+ req_conf->location);
+ do {
+ request_rec *rr;
+
+ rr = ap_sub_req_lookup_file(req_conf->location, r,
+ r->output_filters);
+ if (rr->status == HTTP_OK && rr->finfo.filetype != 0) {
+ /*
+ * We don't touch Content-Length here. It might be
+ * borked (there's plenty of room for a race condition).
+ * Either the backend sets it or it's gonna be chunked.
+ */
+ ap_run_sub_req(rr);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Subrequest to file '%s' not possible. "
+ "(rr->status=%d, rr->finfo.filetype=%d)",
+ req_conf->location, rr->status,
+ rr->finfo.filetype);
+ *status = HTTP_INTERNAL_SERVER_ERROR;
+ return *status;
+ }
+ } while(0);
+
+ return OK;
+ /* break; */
+ }
+ }
+
+ return DECLINED;
+}
+
+
+/*
+ * This handles scgi:(dest) URLs
+ */
+static int scgi_handler(request_rec *r, proxy_worker *worker,
+ proxy_server_conf *conf, char *url,
+ const char *proxyname, apr_port_t proxyport)
+{
+ int status;
+ proxy_conn_rec *backend = NULL;
+ apr_pool_t *p = r->pool;
+ apr_uri_t *uri = apr_palloc(r->pool, sizeof(*uri));
+ char dummy;
+
+ if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "proxy: " PROXY_FUNCTION ": declining URL %s", url);
+ return DECLINED;
+ }
+ url += sizeof(SCHEME); /* keep the slashes */
+
+ /* Create space for state information */
+ status = ap_proxy_acquire_connection(PROXY_FUNCTION, &backend, worker,
+ r->server);
+ if (status != OK) {
+ goto cleanup;
+ }
+ backend->is_ssl = 0;
+
+ /* Step One: Determine Who To Connect To */
+ status = ap_proxy_determine_connection(p, r, conf, worker, backend,
+ uri, &url, proxyname, proxyport,
+ &dummy, 1);
+ if (status != OK) {
+ goto cleanup;
+ }
+
+ /* Step Two: Make the Connection */
+ if (ap_proxy_connect_backend(PROXY_FUNCTION, backend, worker, r->server)) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "proxy: " PROXY_FUNCTION ": failed to make connection "
+ "to backend: %s:%u", backend->hostname, backend->port);
+ status = HTTP_SERVICE_UNAVAILABLE;
+ goto cleanup;
+ }
+
+ /* Step Three: Process the Request */
+ if ( ((status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)) != OK)
+ || ((status = send_headers(r, backend)) != OK)
+ || ((status = send_request_body(r, backend)) != OK)
+ || ((status = pass_response(r, backend)) != OK)) {
+ goto cleanup;
+ }
+
+cleanup:
+ if (backend) {
+ backend->close = 1; /* always close the socket */
+ ap_proxy_release_connection(PROXY_FUNCTION, backend, r->server);
+ }
+ return status;
+}
+
+
+static void *create_scgi_config(apr_pool_t *p, char *dummy)
+{
+ scgi_config *conf=apr_palloc(p, sizeof(*conf));
+
+ conf->sendfile = NULL;
+ conf->internal_redirect = -1;
+
+ return conf;
+}
+
+
+static void *merge_scgi_config(apr_pool_t *p, void *base_, void *add_)
+{
+ scgi_config *base=base_, *add=add_, *conf=apr_palloc(p, sizeof(*conf));
+
+ conf->sendfile = add->sendfile ? add->sendfile: base->sendfile;
+ conf->internal_redirect = (add->internal_redirect != -1)
+ ? add->internal_redirect
+ : base->internal_redirect;
+ return conf;
+}
+
+
+static const char *scgi_set_send_file(cmd_parms *cmd, void *mconfig,
+ const char *arg)
+{
+ scgi_config *conf=mconfig;
+
+ if (!strcasecmp(arg, "Off")) {
+ conf->sendfile = scgi_sendfile_off;
+ }
+ else if (!strcasecmp(arg, "On")) {
+ conf->sendfile = scgi_sendfile_on;
+ }
+ else {
+ conf->sendfile = arg;
+ }
+ return NULL;
+}
+
+
+static const command_rec scgi_cmds[] =
+{
+ AP_INIT_TAKE1("ProxySCGISendfile", scgi_set_send_file, NULL,
+ RSRC_CONF|ACCESS_CONF,
+ "The name of the X-Sendfile peudo response header or "
+ "On or Off"),
+ AP_INIT_FLAG("ProxySCGIInternalRedirect", ap_set_flag_slot,
+ (void*)APR_OFFSETOF(scgi_config, internal_redirect),
+ RSRC_CONF|ACCESS_CONF,
+ "Off if internal redirect responses should not be accepted"),
+ {NULL}
+};
+
+
+static void register_hooks(apr_pool_t *p)
+{
+ proxy_hook_scheme_handler(scgi_handler, NULL, NULL, APR_HOOK_FIRST);
+ proxy_hook_canon_handler(scgi_canon, NULL, NULL, APR_HOOK_FIRST);
+ APR_OPTIONAL_HOOK(proxy, request_status, scgi_request_status, NULL, NULL,
+ APR_HOOK_MIDDLE);
+}
+
+
+module AP_MODULE_DECLARE_DATA proxy_scgi_module = {
+ STANDARD20_MODULE_STUFF,
+ create_scgi_config, /* create per-directory config structure */
+ merge_scgi_config, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ scgi_cmds, /* command table */
+ register_hooks /* register hooks */
+};
diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c
index 17a9c86d..259254a9 100644
--- a/modules/ssl/ssl_engine_init.c
+++ b/modules/ssl/ssl_engine_init.c
@@ -1213,7 +1213,7 @@ STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s,
/*
* Cleanup
*/
- sk_X509_NAME_set_cmp_func(ca_list, NULL);
+ (void) sk_X509_NAME_set_cmp_func(ca_list, NULL);
return ca_list;
}
diff --git a/modules/ssl/ssl_engine_pphrase.c b/modules/ssl/ssl_engine_pphrase.c
index 76763e31..dc49eff6 100644
--- a/modules/ssl/ssl_engine_pphrase.c
+++ b/modules/ssl/ssl_engine_pphrase.c
@@ -188,7 +188,8 @@ void ssl_pphrase_Handle(server_rec *s, apr_pool_t *p)
if (sc->server->pks->cert_files[0] == NULL) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, pServ,
"Server should be SSL-aware but has no certificate "
- "configured [Hint: SSLCertificateFile]");
+ "configured [Hint: SSLCertificateFile] (%s:%d)",
+ pServ->defn_name, pServ->defn_line_number);
ssl_die();
}
algoCert = SSL_ALGO_UNKNOWN;
diff --git a/modules/ssl/ssl_engine_vars.c b/modules/ssl/ssl_engine_vars.c
index 7833f14a..ab99af9e 100644
--- a/modules/ssl/ssl_engine_vars.c
+++ b/modules/ssl/ssl_engine_vars.c
@@ -418,10 +418,8 @@ static const struct {
{ "G", NID_givenName },
{ "S", NID_surname },
{ "D", NID_description },
-#ifdef NID_x500UniqueIdentifier /* new name as of Openssl 0.9.7 */
- { "UID", NID_x500UniqueIdentifier },
-#else /* old name, OpenSSL < 0.9.7 */
- { "UID", NID_uniqueIdentifier },
+#ifdef NID_userId
+ { "UID", NID_userId },
#endif
{ "Email", NID_pkcs9_emailAddress },
{ NULL, 0 }