summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/NWGNUmakefile6
-rw-r--r--modules/aaa/mod_access_compat.c10
-rw-r--r--modules/aaa/mod_auth_basic.c155
-rw-r--r--modules/aaa/mod_auth_digest.c68
-rw-r--r--modules/aaa/mod_auth_form.c1
-rw-r--r--modules/aaa/mod_authn_file.c16
-rw-r--r--modules/aaa/mod_authnz_ldap.c42
-rw-r--r--modules/aaa/mod_authz_groupfile.c18
-rw-r--r--modules/aaa/mod_authz_host.c6
-rw-r--r--modules/arch/netware/mod_netware.c2
-rw-r--r--modules/arch/win32/mod_isapi.h2
-rw-r--r--modules/cache/NWGNUcach_socache262
-rw-r--r--modules/cache/NWGNUmakefile1
-rw-r--r--modules/cache/cache_common.h1
-rw-r--r--modules/cache/cache_socache_common.h57
-rw-r--r--modules/cache/cache_storage.c394
-rw-r--r--modules/cache/cache_storage.h23
-rw-r--r--modules/cache/cache_util.c365
-rw-r--r--modules/cache/cache_util.h31
-rw-r--r--modules/cache/config.m43
-rw-r--r--modules/cache/mod_cache.c393
-rw-r--r--modules/cache/mod_cache_disk.c170
-rw-r--r--modules/cache/mod_cache_socache.c1501
-rw-r--r--modules/cache/mod_cache_socache.dsp115
-rw-r--r--modules/cache/mod_socache_memcache.c8
-rw-r--r--modules/cluster/mod_heartmonitor.c4
-rw-r--r--modules/core/NWGNUmakefile257
-rw-r--r--modules/core/config.m42
-rw-r--r--modules/core/mod_macro.c953
-rw-r--r--modules/core/mod_macro.dsp111
-rwxr-xr-xmodules/core/test/Makefile69
-rw-r--r--modules/core/test/conf/inc63_1.conf5
-rw-r--r--modules/core/test/conf/inc63_2.conf3
-rwxr-xr-xmodules/core/test/conf/test01.conf3
-rwxr-xr-xmodules/core/test/conf/test02.conf3
-rwxr-xr-xmodules/core/test/conf/test03.conf5
-rwxr-xr-xmodules/core/test/conf/test04.conf5
-rwxr-xr-xmodules/core/test/conf/test05.conf5
-rwxr-xr-xmodules/core/test/conf/test06.conf6
-rwxr-xr-xmodules/core/test/conf/test07.conf3
-rwxr-xr-xmodules/core/test/conf/test08.conf3
-rwxr-xr-xmodules/core/test/conf/test09.conf6
-rwxr-xr-xmodules/core/test/conf/test10.conf10
-rwxr-xr-xmodules/core/test/conf/test11.conf15
-rwxr-xr-xmodules/core/test/conf/test12.conf12
-rwxr-xr-xmodules/core/test/conf/test13.conf18
-rwxr-xr-xmodules/core/test/conf/test14.conf23
-rwxr-xr-xmodules/core/test/conf/test15.conf9
-rwxr-xr-xmodules/core/test/conf/test16.conf11
-rwxr-xr-xmodules/core/test/conf/test17.conf10
-rwxr-xr-xmodules/core/test/conf/test18.conf10
-rwxr-xr-xmodules/core/test/conf/test19.conf26
-rwxr-xr-xmodules/core/test/conf/test20.conf11
-rwxr-xr-xmodules/core/test/conf/test21.conf11
-rwxr-xr-xmodules/core/test/conf/test22.conf11
-rwxr-xr-xmodules/core/test/conf/test23.conf15
-rwxr-xr-xmodules/core/test/conf/test24.conf23
-rwxr-xr-xmodules/core/test/conf/test25.conf27
-rwxr-xr-xmodules/core/test/conf/test26.conf19
-rwxr-xr-xmodules/core/test/conf/test27.conf22
-rwxr-xr-xmodules/core/test/conf/test28.conf13
-rwxr-xr-xmodules/core/test/conf/test29.conf10
-rwxr-xr-xmodules/core/test/conf/test30.conf12
-rwxr-xr-xmodules/core/test/conf/test31.conf16
-rwxr-xr-xmodules/core/test/conf/test32.conf7
-rwxr-xr-xmodules/core/test/conf/test33.conf3
-rwxr-xr-xmodules/core/test/conf/test34.conf14
-rwxr-xr-xmodules/core/test/conf/test35.conf10
-rwxr-xr-xmodules/core/test/conf/test36.conf12
-rwxr-xr-xmodules/core/test/conf/test37.conf7
-rwxr-xr-xmodules/core/test/conf/test38.conf10
-rwxr-xr-xmodules/core/test/conf/test39.conf23
-rwxr-xr-xmodules/core/test/conf/test40.conf33
-rwxr-xr-xmodules/core/test/conf/test41.conf20
-rwxr-xr-xmodules/core/test/conf/test42.conf13
-rwxr-xr-xmodules/core/test/conf/test43.conf29
-rwxr-xr-xmodules/core/test/conf/test44.conf19
-rwxr-xr-xmodules/core/test/conf/test45.conf7
-rwxr-xr-xmodules/core/test/conf/test46.conf11
-rwxr-xr-xmodules/core/test/conf/test47.conf15
-rwxr-xr-xmodules/core/test/conf/test48.conf23
-rw-r--r--modules/core/test/conf/test49.conf2
-rw-r--r--modules/core/test/conf/test50.conf5
-rw-r--r--modules/core/test/conf/test51.conf9
-rw-r--r--modules/core/test/conf/test52.conf8
-rwxr-xr-xmodules/core/test/conf/test53.conf2
-rw-r--r--modules/core/test/conf/test54.conf6
-rw-r--r--modules/core/test/conf/test55.conf11
-rw-r--r--modules/core/test/conf/test56.conf18
-rw-r--r--modules/core/test/conf/test57.conf4
-rw-r--r--modules/core/test/conf/test58.conf4
-rw-r--r--modules/core/test/conf/test59.conf4
-rw-r--r--modules/core/test/conf/test60.conf17
-rw-r--r--modules/core/test/conf/test61.conf18
-rw-r--r--modules/core/test/conf/test62.conf25
-rw-r--r--modules/core/test/conf/test63.conf9
-rw-r--r--modules/core/test/conf/test64.conf5
-rw-r--r--modules/core/test/conf/test65.conf11
-rw-r--r--modules/core/test/conf/test66.conf7
-rw-r--r--modules/core/test/conf/test67.conf1
-rw-r--r--modules/core/test/conf/test68.conf5
-rw-r--r--modules/core/test/conf/test69.conf14
-rw-r--r--modules/core/test/ref/test01.out3
-rw-r--r--modules/core/test/ref/test02.out3
-rw-r--r--modules/core/test/ref/test03.out3
-rw-r--r--modules/core/test/ref/test04.out3
-rw-r--r--modules/core/test/ref/test05.out3
-rw-r--r--modules/core/test/ref/test06.out3
-rw-r--r--modules/core/test/ref/test07.out3
-rw-r--r--modules/core/test/ref/test08.out3
-rw-r--r--modules/core/test/ref/test09.out3
-rw-r--r--modules/core/test/ref/test10.out3
-rw-r--r--modules/core/test/ref/test11.out6
-rw-r--r--modules/core/test/ref/test12.out7
-rw-r--r--modules/core/test/ref/test13.out8
-rw-r--r--modules/core/test/ref/test14.out14
-rw-r--r--modules/core/test/ref/test15.out6
-rw-r--r--modules/core/test/ref/test16.out5
-rw-r--r--modules/core/test/ref/test17.out7
-rw-r--r--modules/core/test/ref/test18.out7
-rw-r--r--modules/core/test/ref/test19.out9
-rw-r--r--modules/core/test/ref/test20.out4
-rw-r--r--modules/core/test/ref/test21.out5
-rw-r--r--modules/core/test/ref/test22.out6
-rw-r--r--modules/core/test/ref/test23.out7
-rw-r--r--modules/core/test/ref/test24.out8
-rw-r--r--modules/core/test/ref/test25.out9
-rw-r--r--modules/core/test/ref/test26.out11
-rw-r--r--modules/core/test/ref/test27.out8
-rw-r--r--modules/core/test/ref/test28.out6
-rw-r--r--modules/core/test/ref/test29.out4
-rw-r--r--modules/core/test/ref/test30.out7
-rw-r--r--modules/core/test/ref/test31.out23
-rw-r--r--modules/core/test/ref/test32.out3
-rw-r--r--modules/core/test/ref/test33.out3
-rw-r--r--modules/core/test/ref/test34.out13
-rw-r--r--modules/core/test/ref/test35.out13
-rw-r--r--modules/core/test/ref/test36.out20
-rw-r--r--modules/core/test/ref/test37.out3
-rw-r--r--modules/core/test/ref/test38.out6
-rw-r--r--modules/core/test/ref/test39.out7
-rw-r--r--modules/core/test/ref/test40.out18
-rw-r--r--modules/core/test/ref/test41.out9
-rw-r--r--modules/core/test/ref/test42.out15
-rw-r--r--modules/core/test/ref/test43.out8
-rw-r--r--modules/core/test/ref/test44.out5
-rw-r--r--modules/core/test/ref/test45.out19
-rw-r--r--modules/core/test/ref/test46.out9
-rw-r--r--modules/core/test/ref/test47.out8
-rw-r--r--modules/core/test/ref/test48.out20
-rw-r--r--modules/core/test/ref/test49.out3
-rw-r--r--modules/core/test/ref/test50.out3
-rw-r--r--modules/core/test/ref/test51.out3
-rw-r--r--modules/core/test/ref/test52.out6
-rw-r--r--modules/core/test/ref/test53.out3
-rw-r--r--modules/core/test/ref/test54.out6
-rw-r--r--modules/core/test/ref/test55.out8
-rw-r--r--modules/core/test/ref/test56.out12
-rw-r--r--modules/core/test/ref/test57.out3
-rw-r--r--modules/core/test/ref/test58.out3
-rw-r--r--modules/core/test/ref/test59.out3
-rw-r--r--modules/core/test/ref/test60.out15
-rw-r--r--modules/core/test/ref/test61.out9
-rw-r--r--modules/core/test/ref/test62.out15
-rw-r--r--modules/core/test/ref/test63.out10
-rw-r--r--modules/core/test/ref/test64.out7
-rw-r--r--modules/core/test/ref/test65.out7
-rw-r--r--modules/core/test/ref/test66.out7
-rw-r--r--modules/core/test/ref/test67.out5
-rw-r--r--modules/core/test/ref/test68.out6
-rw-r--r--modules/core/test/ref/test69.out10
-rw-r--r--modules/dav/fs/dbm.c14
-rw-r--r--modules/dav/main/mod_dav.c68
-rw-r--r--modules/dav/main/mod_dav.h15
-rw-r--r--modules/dav/main/props.c8
-rw-r--r--modules/dav/main/util.c36
-rw-r--r--modules/filters/mod_charset_lite.c16
-rw-r--r--modules/filters/mod_deflate.c45
-rw-r--r--modules/filters/mod_ext_filter.c5
-rw-r--r--modules/filters/mod_include.c5
-rw-r--r--modules/filters/mod_proxy_html.c8
-rw-r--r--modules/filters/mod_ratelimit.c22
-rw-r--r--modules/filters/regexp.h3
-rw-r--r--modules/filters/sed0.c2
-rw-r--r--modules/generators/mod_autoindex.c8
-rw-r--r--modules/generators/mod_cgi.c5
-rw-r--r--modules/generators/mod_status.c47
-rw-r--r--modules/http/byterange_filter.c17
-rw-r--r--modules/http/http_filters.c2
-rw-r--r--modules/http/http_protocol.c326
-rw-r--r--modules/loggers/mod_log_config.c4
-rw-r--r--modules/loggers/mod_log_forensic.c2
-rw-r--r--modules/lua/NWGNUmakefile11
-rw-r--r--modules/lua/README10
-rw-r--r--modules/lua/config.m42
-rw-r--r--modules/lua/lua_apr.c35
-rw-r--r--modules/lua/lua_apr.h17
-rw-r--r--modules/lua/lua_config.c4
-rw-r--r--modules/lua/lua_config.h4
-rw-r--r--modules/lua/lua_dbd.c7
-rw-r--r--modules/lua/lua_dbd.h2
-rw-r--r--modules/lua/lua_passwd.c178
-rw-r--r--modules/lua/lua_passwd.h91
-rw-r--r--modules/lua/lua_request.c1533
-rw-r--r--modules/lua/lua_request.h20
-rw-r--r--modules/lua/lua_vmprep.c198
-rw-r--r--modules/lua/lua_vmprep.h53
-rw-r--r--modules/lua/mod_lua.c660
-rw-r--r--modules/lua/mod_lua.dsp8
-rw-r--r--modules/lua/mod_lua.h19
-rw-r--r--modules/mappers/mod_imagemap.c2
-rw-r--r--modules/mappers/mod_negotiation.c8
-rw-r--r--modules/mappers/mod_rewrite.c2
-rw-r--r--modules/metadata/mod_cern_meta.c2
-rw-r--r--modules/metadata/mod_headers.c2
-rw-r--r--modules/metadata/mod_remoteip.c13
-rw-r--r--modules/metadata/mod_setenvif.c2
-rw-r--r--modules/proxy/NWGNUmakefile1
-rw-r--r--modules/proxy/NWGNUproxy4
-rw-r--r--modules/proxy/NWGNUproxywstunnel250
-rw-r--r--modules/proxy/ajp_utils.c2
-rw-r--r--modules/proxy/balancers/mod_lbmethod_heartbeat.c4
-rw-r--r--modules/proxy/config.m43
-rw-r--r--modules/proxy/mod_proxy.c110
-rw-r--r--modules/proxy/mod_proxy.h57
-rw-r--r--modules/proxy/mod_proxy_balancer.c20
-rw-r--r--modules/proxy/mod_proxy_http.c364
-rw-r--r--modules/proxy/mod_proxy_wstunnel.c399
-rw-r--r--modules/proxy/mod_proxy_wstunnel.dsp123
-rw-r--r--modules/proxy/proxy_util.c372
-rw-r--r--modules/session/NWGNUmakefile4
-rw-r--r--modules/session/mod_session.c3
-rw-r--r--modules/session/mod_session_cookie.c1
-rw-r--r--modules/session/mod_session_crypto.c5
-rw-r--r--modules/session/mod_session_dbd.c82
-rw-r--r--modules/slotmem/mod_slotmem_shm.c4
-rw-r--r--modules/ssl/mod_ssl.c7
-rw-r--r--modules/ssl/ssl_engine_config.c11
-rw-r--r--modules/ssl/ssl_engine_init.c81
-rw-r--r--modules/ssl/ssl_engine_io.c43
-rw-r--r--modules/ssl/ssl_private.h2
-rw-r--r--modules/ssl/ssl_util_ocsp.c8
-rw-r--r--modules/ssl/ssl_util_ssl.c93
-rw-r--r--modules/ssl/ssl_util_ssl.h1
-rw-r--r--modules/ssl/ssl_util_stapling.c2
245 files changed, 10666 insertions, 1319 deletions
diff --git a/modules/NWGNUmakefile b/modules/NWGNUmakefile
index 0ec061ef..6e8113d4 100644
--- a/modules/NWGNUmakefile
+++ b/modules/NWGNUmakefile
@@ -13,12 +13,13 @@ include $(AP_WORK)/build/NWGNUenvironment.inc
ifeq "$(wildcard $(APRUTIL)/include/apr_ldap.h)" "$(APRUTIL)/include/apr_ldap.h"
WITH_LDAP = $(shell $(AWK) '/^\#define APR_HAS_LDAP /{print $$3}' $(APRUTIL)/include/apr_ldap.h)
else
-ifeq "$(MAKECMDGOALS)" "clean"
WITH_LDAP = 1
-else
+ifneq "$(MAKECMDGOALS)" "clean"
+ifneq "$(findstring clobber_,$(MAKECMDGOALS))" "clobber_"
WITH_LDAP = 0
endif
endif
+endif
# If USE_STDSOCKETS is defined we allways build mod_ssl
ifdef USE_STDSOCKETS
@@ -33,6 +34,7 @@ SUBDIRS = \
aaa \
cache \
cluster \
+ core \
dav/main \
dav/fs \
dav/lock \
diff --git a/modules/aaa/mod_access_compat.c b/modules/aaa/mod_access_compat.c
index 8d7afca1..46d8da0e 100644
--- a/modules/aaa/mod_access_compat.c
+++ b/modules/aaa/mod_access_compat.c
@@ -147,7 +147,6 @@ static const char *allow_cmd(cmd_parms *cmd, void *dv, const char *from,
allowdeny *a;
char *where = apr_pstrdup(cmd->pool, where_c);
char *s;
- char msgbuf[120];
apr_status_t rv;
if (strcasecmp(from, "from"))
@@ -178,17 +177,14 @@ static const char *allow_cmd(cmd_parms *cmd, void *dv, const char *from,
return "An IP address was expected";
}
else if (rv != APR_SUCCESS) {
- apr_strerror(rv, msgbuf, sizeof msgbuf);
- return apr_pstrdup(cmd->pool, msgbuf);
+ return apr_psprintf(cmd->pool, "%pm", &rv);
}
a->type = T_IP;
}
else if (!APR_STATUS_IS_EINVAL(rv = apr_ipsubnet_create(&a->x.ip, where,
NULL, cmd->pool))) {
- if (rv != APR_SUCCESS) {
- apr_strerror(rv, msgbuf, sizeof msgbuf);
- return apr_pstrdup(cmd->pool, msgbuf);
- }
+ if (rv != APR_SUCCESS)
+ return apr_psprintf(cmd->pool, "%pm", &rv);
a->type = T_IP;
}
else { /* no slash, didn't look like an IP address => must be a host */
diff --git a/modules/aaa/mod_auth_basic.c b/modules/aaa/mod_auth_basic.c
index cadeb5bd..8c1367b3 100644
--- a/modules/aaa/mod_auth_basic.c
+++ b/modules/aaa/mod_auth_basic.c
@@ -15,7 +15,6 @@
*/
#include "apr_strings.h"
-#include "apr_md5.h" /* for apr_password_validate */
#include "apr_lib.h" /* for apr_isspace */
#include "apr_base64.h" /* for apr_base64_decode et al */
#define APR_WANT_STRFUNC /* for strcasecmp */
@@ -29,26 +28,53 @@
#include "http_protocol.h"
#include "http_request.h"
#include "ap_provider.h"
+#include "ap_expr.h"
#include "mod_auth.h"
typedef struct {
authn_provider_list *providers;
- char *dir;
+ char *dir; /* unused variable */
int authoritative;
+ ap_expr_info_t *fakeuser;
+ ap_expr_info_t *fakepass;
+ int fake_set:1;
+ int authoritative_set:1;
} auth_basic_config_rec;
static void *create_auth_basic_dir_config(apr_pool_t *p, char *d)
{
auth_basic_config_rec *conf = apr_pcalloc(p, sizeof(*conf));
- conf->dir = d;
/* Any failures are fatal. */
conf->authoritative = 1;
return conf;
}
+static void *merge_auth_basic_dir_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ auth_basic_config_rec *newconf = apr_pcalloc(p, sizeof(*newconf));
+ auth_basic_config_rec *base = basev;
+ auth_basic_config_rec *overrides = overridesv;
+
+ newconf->authoritative =
+ overrides->authoritative_set ? overrides->authoritative :
+ base->authoritative;
+ newconf->authoritative_set = overrides->authoritative_set
+ || base->authoritative_set;
+
+ newconf->fakeuser =
+ overrides->fake_set ? overrides->fakeuser : base->fakeuser;
+ newconf->fakepass =
+ overrides->fake_set ? overrides->fakepass : base->fakepass;
+ newconf->fake_set = overrides->fake_set || base->fake_set;
+
+ newconf->providers = overrides->providers ? overrides->providers : base->providers;
+
+ return newconf;
+}
+
static const char *add_authn_provider(cmd_parms *cmd, void *config,
const char *arg)
{
@@ -94,15 +120,72 @@ static const char *add_authn_provider(cmd_parms *cmd, void *config,
return NULL;
}
+static const char *set_authoritative(cmd_parms * cmd, void *config, int flag)
+{
+ auth_basic_config_rec *conf = (auth_basic_config_rec *) config;
+
+ conf->authoritative = flag;
+ conf->authoritative_set = 1;
+
+ return NULL;
+}
+
+static const char *add_basic_fake(cmd_parms * cmd, void *config,
+ const char *user, const char *pass)
+{
+ auth_basic_config_rec *conf = (auth_basic_config_rec *) config;
+ const char *err;
+
+ if (!strcasecmp(user, "off")) {
+
+ conf->fakeuser = NULL;
+ conf->fakepass = NULL;
+ conf->fake_set = 1;
+
+ }
+ else {
+
+ /* if password is unspecified, set it to the fixed string "password" to
+ * be compatible with the behaviour of mod_ssl.
+ */
+ if (!pass) {
+ pass = "password";
+ }
+
+ conf->fakeuser =
+ ap_expr_parse_cmd(cmd, user, AP_EXPR_FLAG_STRING_RESULT,
+ &err, NULL);
+ if (err) {
+ return apr_psprintf(cmd->pool,
+ "Could not parse fake username expression '%s': %s", user,
+ err);
+ }
+ conf->fakepass =
+ ap_expr_parse_cmd(cmd, pass, AP_EXPR_FLAG_STRING_RESULT,
+ &err, NULL);
+ if (err) {
+ return apr_psprintf(cmd->pool,
+ "Could not parse fake password expression '%s': %s", user,
+ err);
+ }
+ conf->fake_set = 1;
+
+ }
+
+ return NULL;
+}
+
static const command_rec auth_basic_cmds[] =
{
AP_INIT_ITERATE("AuthBasicProvider", add_authn_provider, NULL, OR_AUTHCFG,
"specify the auth providers for a directory or location"),
- AP_INIT_FLAG("AuthBasicAuthoritative", ap_set_flag_slot,
- (void *)APR_OFFSETOF(auth_basic_config_rec, authoritative),
- OR_AUTHCFG,
+ AP_INIT_FLAG("AuthBasicAuthoritative", set_authoritative, NULL, OR_AUTHCFG,
"Set to 'Off' to allow access control to be passed along to "
"lower modules if the UserID is not known to this module"),
+ AP_INIT_TAKE12("AuthBasicFake", add_basic_fake, NULL, OR_AUTHCFG,
+ "Fake basic authentication using the given expressions for "
+ "username and password, 'off' to disable. Password defaults "
+ "to 'password' if missing."),
{NULL}
};
@@ -295,10 +378,68 @@ static int authenticate_basic_user(request_rec *r)
return OK;
}
+/* If requested, create a fake basic authentication header for the benefit
+ * of a proxy or application running behind this server.
+ */
+static int authenticate_basic_fake(request_rec *r)
+{
+ const char *auth_line, *user, *pass, *err;
+ auth_basic_config_rec *conf = ap_get_module_config(r->per_dir_config,
+ &auth_basic_module);
+
+ if (!conf->fakeuser) {
+ return DECLINED;
+ }
+
+ user = ap_expr_str_exec(r, conf->fakeuser, &err);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02455)
+ "AuthBasicFake: could not evaluate user expression for URI '%s': %s", r->uri, err);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if (!user || !*user) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02458)
+ "AuthBasicFake: empty username expression for URI '%s', ignoring", r->uri);
+
+ apr_table_unset(r->headers_in, "Authorization");
+
+ return DECLINED;
+ }
+
+ pass = ap_expr_str_exec(r, conf->fakepass, &err);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02456)
+ "AuthBasicFake: could not evaluate password expression for URI '%s': %s", r->uri, err);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if (!pass || !*pass) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02459)
+ "AuthBasicFake: empty password expression for URI '%s', ignoring", r->uri);
+
+ apr_table_unset(r->headers_in, "Authorization");
+
+ return DECLINED;
+ }
+
+ auth_line = apr_pstrcat(r->pool, "Basic ",
+ ap_pbase64encode(r->pool,
+ apr_pstrcat(r->pool, user,
+ ":", pass, NULL)),
+ NULL);
+ apr_table_setn(r->headers_in, "Authorization", auth_line);
+
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02457)
+ "AuthBasicFake: \"Authorization: %s\"",
+ auth_line);
+
+ return OK;
+}
+
static void register_hooks(apr_pool_t *p)
{
ap_hook_check_authn(authenticate_basic_user, NULL, NULL, APR_HOOK_MIDDLE,
AP_AUTH_INTERNAL_PER_CONF);
+ ap_hook_fixups(authenticate_basic_fake, NULL, NULL, APR_HOOK_LAST);
ap_hook_note_auth_failure(hook_note_basic_auth_failure, NULL, NULL,
APR_HOOK_MIDDLE);
}
@@ -307,7 +448,7 @@ AP_DECLARE_MODULE(auth_basic) =
{
STANDARD20_MODULE_STUFF,
create_auth_basic_dir_config, /* dir config creater */
- NULL, /* dir merger --- default is to override */
+ merge_auth_basic_dir_config, /* dir merger --- default is to override */
NULL, /* server config */
NULL, /* merge server config */
auth_basic_cmds, /* command apr_table_t */
diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c
index 65183160..987e5b5b 100644
--- a/modules/aaa/mod_auth_digest.c
+++ b/modules/aaa/mod_auth_digest.c
@@ -91,7 +91,7 @@ typedef struct digest_config_struct {
const char *dir_name;
authn_provider_list *providers;
const char *realm;
- char **qop_list;
+ apr_array_header_t *qop_list;
apr_sha1_ctx_t nonce_ctx;
apr_time_t nonce_lifetime;
const char *nonce_format;
@@ -223,6 +223,8 @@ static apr_status_t cleanup_tables(void *not_used)
opaque_lock = NULL;
}
+ client_list = NULL;
+
return APR_SUCCESS;
}
@@ -240,10 +242,8 @@ static apr_status_t initialize_secret(server_rec *s)
#endif
if (status != APR_SUCCESS) {
- char buf[120];
ap_log_error(APLOG_MARK, APLOG_CRIT, status, s, APLOGNO(01758)
- "error generating secret: %s",
- apr_strerror(status, buf, sizeof(buf)));
+ "error generating secret");
return status;
}
@@ -451,8 +451,7 @@ static void *create_digest_dir_config(apr_pool_t *p, char *dir)
conf = (digest_config_rec *) apr_pcalloc(p, sizeof(digest_config_rec));
if (conf) {
- conf->qop_list = apr_palloc(p, sizeof(char*));
- conf->qop_list[0] = NULL;
+ conf->qop_list = apr_array_make(p, 2, sizeof(char *));
conf->nonce_lifetime = DFLT_NONCE_LIFE;
conf->dir_name = apr_pstrdup(p, dir);
conf->algorithm = DFLT_ALGORITHM;
@@ -532,15 +531,10 @@ static const char *add_authn_provider(cmd_parms *cmd, void *config,
static const char *set_qop(cmd_parms *cmd, void *config, const char *op)
{
digest_config_rec *conf = (digest_config_rec *) config;
- char **tmp;
- int cnt;
if (!strcasecmp(op, "none")) {
- if (conf->qop_list[0] == NULL) {
- conf->qop_list = apr_palloc(cmd->pool, 2 * sizeof(char*));
- conf->qop_list[1] = NULL;
- }
- conf->qop_list[0] = "none";
+ apr_array_clear(conf->qop_list);
+ *(const char **)apr_array_push(conf->qop_list) = "none";
return NULL;
}
@@ -551,14 +545,7 @@ static const char *set_qop(cmd_parms *cmd, void *config, const char *op)
return apr_pstrcat(cmd->pool, "Unrecognized qop: ", op, NULL);
}
- for (cnt = 0; conf->qop_list[cnt] != NULL; cnt++)
- ;
-
- tmp = apr_palloc(cmd->pool, (cnt + 2) * sizeof(char*));
- memcpy(tmp, conf->qop_list, cnt*sizeof(char*));
- tmp[cnt] = apr_pstrdup(cmd->pool, op);
- tmp[cnt+1] = NULL;
- conf->qop_list = tmp;
+ *(const char **)apr_array_push(conf->qop_list) = op;
return NULL;
}
@@ -1056,10 +1043,8 @@ static void gen_nonce_hash(char *hash, const char *timestr, const char *opaque,
const server_rec *server,
const digest_config_rec *conf)
{
- const char *hex = "0123456789abcdef";
unsigned char sha1[APR_SHA1_DIGESTSIZE];
apr_sha1_ctx_t ctx;
- int idx;
memcpy(&ctx, &conf->nonce_ctx, sizeof(ctx));
/*
@@ -1075,12 +1060,7 @@ static void gen_nonce_hash(char *hash, const char *timestr, const char *opaque,
}
apr_sha1_final(sha1, &ctx);
- for (idx=0; idx<APR_SHA1_DIGESTSIZE; idx++) {
- *hash++ = hex[sha1[idx] >> 4];
- *hash++ = hex[sha1[idx] & 0xF];
- }
-
- *hash++ = '\0';
+ ap_bin2hex(sha1, APR_SHA1_DIGESTSIZE, hash);
}
@@ -1251,19 +1231,17 @@ static void note_digest_auth_failure(request_rec *r,
const char *qop, *opaque, *opaque_param, *domain, *nonce;
/* Setup qop */
- if (conf->qop_list[0] == NULL) {
+ if (apr_is_empty_array(conf->qop_list)) {
qop = ", qop=\"auth\"";
}
- else if (!strcasecmp(conf->qop_list[0], "none")) {
+ else if (!strcasecmp(*(const char **)(conf->qop_list->elts), "none")) {
qop = "";
}
else {
- int cnt;
- qop = apr_pstrcat(r->pool, ", qop=\"", conf->qop_list[0], NULL);
- for (cnt = 1; conf->qop_list[cnt] != NULL; cnt++) {
- qop = apr_pstrcat(r->pool, qop, ",", conf->qop_list[cnt], NULL);
- }
- qop = apr_pstrcat(r->pool, qop, "\"", NULL);
+ qop = apr_pstrcat(r->pool, ", qop=\"",
+ apr_array_pstrcat(r->pool, conf->qop_list, ','),
+ "\"",
+ NULL);
}
/* Setup opaque */
@@ -1464,9 +1442,8 @@ static int check_nc(const request_rec *r, const digest_header_rec *resp,
return OK;
}
- if ((conf->qop_list != NULL)
- &&(conf->qop_list[0] != NULL)
- &&!strcasecmp(conf->qop_list[0], "none")) {
+ if (!apr_is_empty_array(conf->qop_list) &&
+ !strcasecmp(*(const char **)(conf->qop_list->elts), "none")) {
/* qop is none, client must not send a nonce count */
if (snc != NULL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01772)
@@ -1893,15 +1870,17 @@ static int authenticate_digest_user(request_rec *r)
else {
const char *exp_digest;
int match = 0, idx;
- for (idx = 0; conf->qop_list[idx] != NULL; idx++) {
- if (!strcasecmp(conf->qop_list[idx], resp->message_qop)) {
+ const char **tmp = (const char **)(conf->qop_list->elts);
+ for (idx = 0; idx < conf->qop_list->nelts; idx++) {
+ if (!strcasecmp(*tmp, resp->message_qop)) {
match = 1;
break;
}
+ ++tmp;
}
if (!match
- && !(conf->qop_list[0] == NULL
+ && !(apr_is_empty_array(conf->qop_list)
&& !strcasecmp(resp->message_qop, "auth"))) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01793)
"invalid qop `%s' received: %s",
@@ -1983,7 +1962,8 @@ static int add_auth_info(request_rec *r)
/* do rfc-2069 digest
*/
- if (conf->qop_list[0] && !strcasecmp(conf->qop_list[0], "none")
+ if (!apr_is_empty_array(conf->qop_list) &&
+ !strcasecmp(*(const char **)(conf->qop_list->elts), "none")
&& resp->message_qop == NULL) {
/* use only RFC-2069 format */
ai = nextnonce;
diff --git a/modules/aaa/mod_auth_form.c b/modules/aaa/mod_auth_form.c
index 28045b5d..7bba517b 100644
--- a/modules/aaa/mod_auth_form.c
+++ b/modules/aaa/mod_auth_form.c
@@ -15,7 +15,6 @@
*/
#include "apr_strings.h"
-#include "apr_md5.h" /* for apr_password_validate */
#include "apr_lib.h" /* for apr_isspace */
#include "apr_base64.h" /* for apr_base64_decode et al */
#define APR_WANT_STRFUNC /* for strcasecmp */
diff --git a/modules/aaa/mod_authn_file.c b/modules/aaa/mod_authn_file.c
index a54a423b..9909e443 100644
--- a/modules/aaa/mod_authn_file.c
+++ b/modules/aaa/mod_authn_file.c
@@ -45,21 +45,11 @@ static void *create_authn_file_dir_config(apr_pool_t *p, char *d)
return conf;
}
-static const char *set_authn_file_slot(cmd_parms *cmd, void *offset,
- const char *f, const char *t)
-{
- if (t && strcmp(t, "standard")) {
- return apr_pstrcat(cmd->pool, "Invalid auth file type: ", t, NULL);
- }
-
- return ap_set_file_slot(cmd, offset, f);
-}
-
static const command_rec authn_file_cmds[] =
{
- AP_INIT_TAKE12("AuthUserFile", set_authn_file_slot,
- (void *)APR_OFFSETOF(authn_file_config_rec, pwfile),
- OR_AUTHCFG, "text file containing user IDs and passwords"),
+ AP_INIT_TAKE1("AuthUserFile", ap_set_file_slot,
+ (void *)APR_OFFSETOF(authn_file_config_rec, pwfile),
+ OR_AUTHCFG, "text file containing user IDs and passwords"),
{NULL}
};
diff --git a/modules/aaa/mod_authnz_ldap.c b/modules/aaa/mod_authnz_ldap.c
index d55b57f5..2c25dbc7 100644
--- a/modules/aaa/mod_authnz_ldap.c
+++ b/modules/aaa/mod_authnz_ldap.c
@@ -1443,7 +1443,7 @@ static const char *mod_auth_ldap_set_deref(cmd_parms *cmd, void *config, const c
sec->deref = always;
}
else {
- return "Unrecognized value for AuthLDAPAliasDereference directive";
+ return "Unrecognized value for AuthLDAPDereferenceAliases directive";
}
return NULL;
}
@@ -1531,6 +1531,43 @@ static const char *set_bind_pattern(cmd_parms *cmd, void *_cfg, const char *exp,
return NULL;
}
+static const char *set_bind_password(cmd_parms *cmd, void *_cfg, const char *arg)
+{
+ authn_ldap_config_t *sec = _cfg;
+ int arglen = strlen(arg);
+ char **argv;
+ char *result;
+
+ if ((arglen > 5) && strncmp(arg, "exec:", 5) == 0) {
+ if (apr_tokenize_to_argv(arg+5, &argv, cmd->temp_pool) != APR_SUCCESS) {
+ return apr_pstrcat(cmd->pool,
+ "Unable to parse exec arguments from ",
+ arg+5, NULL);
+ }
+ argv[0] = ap_server_root_relative(cmd->temp_pool, argv[0]);
+
+ if (!argv[0]) {
+ return apr_pstrcat(cmd->pool,
+ "Invalid AuthLDAPBindPassword exec location:",
+ arg+5, NULL);
+ }
+ result = ap_get_exec_line(cmd->pool,
+ (const char*)argv[0], (const char * const *)argv);
+
+ if(!result) {
+ return apr_pstrcat(cmd->pool,
+ "Unable to get bind password from exec of ",
+ arg+5, NULL);
+ }
+ sec->bindpw = result;
+ }
+ else {
+ sec->bindpw = (char *)arg;
+ }
+
+ return NULL;
+}
+
static const command_rec authnz_ldap_cmds[] =
{
AP_INIT_TAKE12("AuthLDAPURL", mod_auth_ldap_parse_url, NULL, OR_AUTHCFG,
@@ -1561,8 +1598,7 @@ static const command_rec authnz_ldap_cmds[] =
(void *)APR_OFFSETOF(authn_ldap_config_t, binddn), OR_AUTHCFG,
"DN to use to bind to LDAP server. If not provided, will do an anonymous bind."),
- AP_INIT_TAKE1("AuthLDAPBindPassword", ap_set_string_slot,
- (void *)APR_OFFSETOF(authn_ldap_config_t, bindpw), OR_AUTHCFG,
+ AP_INIT_TAKE1("AuthLDAPBindPassword", set_bind_password, NULL, OR_AUTHCFG,
"Password to use to bind to LDAP server. If not provided, will do an anonymous bind."),
AP_INIT_FLAG("AuthLDAPBindAuthoritative", ap_set_flag_slot,
diff --git a/modules/aaa/mod_authz_groupfile.c b/modules/aaa/mod_authz_groupfile.c
index 15bb60ff..934a7d2f 100644
--- a/modules/aaa/mod_authz_groupfile.c
+++ b/modules/aaa/mod_authz_groupfile.c
@@ -70,22 +70,12 @@ static void *create_authz_groupfile_dir_config(apr_pool_t *p, char *d)
return conf;
}
-static const char *set_authz_groupfile_slot(cmd_parms *cmd, void *offset, const char *f,
- const char *t)
-{
- if (t && strcmp(t, "standard")) {
- return apr_pstrcat(cmd->pool, "Invalid auth file type: ", t, NULL);
- }
-
- return ap_set_file_slot(cmd, offset, f);
-}
-
static const command_rec authz_groupfile_cmds[] =
{
- AP_INIT_TAKE12("AuthGroupFile", set_authz_groupfile_slot,
- (void *)APR_OFFSETOF(authz_groupfile_config_rec, groupfile),
- OR_AUTHCFG,
- "text file containing group names and member user IDs"),
+ AP_INIT_TAKE1("AuthGroupFile", ap_set_file_slot,
+ (void *)APR_OFFSETOF(authz_groupfile_config_rec, groupfile),
+ OR_AUTHCFG,
+ "text file containing group names and member user IDs"),
{NULL}
};
diff --git a/modules/aaa/mod_authz_host.c b/modules/aaa/mod_authz_host.c
index 90876761..f4d5c41c 100644
--- a/modules/aaa/mod_authz_host.c
+++ b/modules/aaa/mod_authz_host.c
@@ -131,10 +131,8 @@ static const char *ip_parse_config(cmd_parms *cmd,
return apr_psprintf(p, "ip address '%s' appears to be invalid", w);
}
else if (rv != APR_SUCCESS) {
- char msgbuf[120];
- apr_strerror(rv, msgbuf, sizeof msgbuf);
- return apr_psprintf(p, "ip address '%s' appears to be invalid: %s",
- w, msgbuf);
+ return apr_psprintf(p, "ip address '%s' appears to be invalid: %pm",
+ w, &rv);
}
if (parsed_subnets)
diff --git a/modules/arch/netware/mod_netware.c b/modules/arch/netware/mod_netware.c
index 1c5950da..b34a5222 100644
--- a/modules/arch/netware/mod_netware.c
+++ b/modules/arch/netware/mod_netware.c
@@ -128,7 +128,7 @@ static apr_status_t ap_cgi_build_command(const char **cmd, const char ***argv,
for (ptr = cmd_only; *ptr && (*ptr != ' '); ptr++);
*ptr = '\0';
- /* Figure out what the extension is so that we can matche it. */
+ /* Figure out what the extension is so that we can match it. */
ext = strrchr(apr_filepath_name_get(cmd_only), '.');
/* If there isn't an extension then give it an empty string */
diff --git a/modules/arch/win32/mod_isapi.h b/modules/arch/win32/mod_isapi.h
index 9f8bc93e..88f50177 100644
--- a/modules/arch/win32/mod_isapi.h
+++ b/modules/arch/win32/mod_isapi.h
@@ -245,7 +245,7 @@ typedef apr_uint32_t (APR_THREAD_FUNC
#define HSE_TERM_MUST_UNLOAD 1
#define HSE_TERM_ADVISORY_UNLOAD 2
-/* The shutdown entry point óptionally exported by an ISAPI handler, passed
+/* The shutdown entry point optionally exported by an ISAPI handler, passed
* HSE_TERM_MUST_UNLOAD or HSE_TERM_ADVISORY_UNLOAD. The module may return
* if passed HSE_TERM_ADVISORY_UNLOAD, and the module will remain loaded.
* If the module returns 1 to HSE_TERM_ADVISORY_UNLOAD it is immediately
diff --git a/modules/cache/NWGNUcach_socache b/modules/cache/NWGNUcach_socache
new file mode 100644
index 00000000..f7ed0e43
--- /dev/null
+++ b/modules/cache/NWGNUcach_socache
@@ -0,0 +1,262 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(SERVER)/mpm/netware \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = cach_socache
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Cache Socache Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = cach_socache
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_cache_socache.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Apache2 \
+ Libc \
+ mod_cach \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ @mod_cache.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ cache_socache_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/cache/NWGNUmakefile b/modules/cache/NWGNUmakefile
index bc0c58fe..e544df62 100644
--- a/modules/cache/NWGNUmakefile
+++ b/modules/cache/NWGNUmakefile
@@ -154,6 +154,7 @@ XDCDATA =
TARGET_nlm = \
$(OBJDIR)/mod_cach.nlm \
$(OBJDIR)/cach_dsk.nlm \
+ $(OBJDIR)/cach_socache.nlm \
$(OBJDIR)/socachdbm.nlm \
$(OBJDIR)/socachmem.nlm \
$(OBJDIR)/socachshmcb.nlm \
diff --git a/modules/cache/cache_common.h b/modules/cache/cache_common.h
index cedce076..9d56d28b 100644
--- a/modules/cache/cache_common.h
+++ b/modules/cache/cache_common.h
@@ -45,6 +45,7 @@ typedef struct cache_control {
unsigned int must_revalidate:1;
unsigned int proxy_revalidate:1;
unsigned int s_maxage:1;
+ unsigned int invalidated:1; /* has this entity been invalidated? */
apr_int64_t max_age_value; /* if positive, then set */
apr_int64_t max_stale_value; /* if positive, then set */
apr_int64_t min_fresh_value; /* if positive, then set */
diff --git a/modules/cache/cache_socache_common.h b/modules/cache/cache_socache_common.h
new file mode 100644
index 00000000..3ee3d0da
--- /dev/null
+++ b/modules/cache/cache_socache_common.h
@@ -0,0 +1,57 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file cache_socache_common.h
+ * @brief Common Shared Object Cache vars/structs
+ *
+ * @defgroup Cache_cache Cache Functions
+ * @ingroup MOD_SOCACHE_CACHE
+ * @{
+ */
+
+#ifndef CACHE_SOCACHE_COMMON_H
+#define CACHE_SOCACHE_COMMON_H
+
+#include "apr_time.h"
+
+#include "cache_common.h"
+
+#define CACHE_SOCACHE_VARY_FORMAT_VERSION 1
+#define CACHE_SOCACHE_DISK_FORMAT_VERSION 2
+
+typedef struct {
+ /* Indicates the format of the header struct stored on-disk. */
+ apr_uint32_t format;
+ /* The HTTP status code returned for this response. */
+ int status;
+ /* The size of the entity name that follows. */
+ apr_size_t name_len;
+ /* The number of times we've cached this entity. */
+ apr_size_t entity_version;
+ /* Miscellaneous time values. */
+ apr_time_t date;
+ apr_time_t expire;
+ apr_time_t request_time;
+ apr_time_t response_time;
+ /* Does this cached request have a body? */
+ unsigned int header_only:1;
+ /* The parsed cache control header */
+ cache_control_t control;
+} cache_socache_info_t;
+
+#endif /* CACHE_SOCACHE_COMMON_H */
+/** @} */
diff --git a/modules/cache/cache_storage.c b/modules/cache/cache_storage.c
index 9021ec1d..af60a39b 100644
--- a/modules/cache/cache_storage.c
+++ b/modules/cache/cache_storage.c
@@ -113,26 +113,69 @@ int cache_create_entity(cache_request_rec *cache, request_rec *r,
return DECLINED;
}
-static int set_cookie_doo_doo(void *v, const char *key, const char *val)
+static int filter_header_do(void *v, const char *key, const char *val)
+{
+ if ((*key == 'W' || *key == 'w') && !strcasecmp(key, "Warning")
+ && *val == '1') {
+ /* any stored Warning headers with warn-code 1xx (see section
+ * 14.46) MUST be deleted from the cache entry and the forwarded
+ * response.
+ */
+ }
+ else {
+ apr_table_addn(v, key, val);
+ }
+ return 1;
+}
+static int remove_header_do(void *v, const char *key, const char *val)
+{
+ if ((*key == 'W' || *key == 'w') && !strcasecmp(key, "Warning")) {
+ /* any stored Warning headers with warn-code 2xx MUST be retained
+ * in the cache entry and the forwarded response.
+ */
+ }
+ else {
+ apr_table_unset(v, key);
+ }
+ return 1;
+}
+static int add_header_do(void *v, const char *key, const char *val)
{
apr_table_addn(v, key, val);
return 1;
}
/**
- * Take headers from the cache, and overlap them over the existing response
- * headers.
+ * Take two sets of headers, sandwich them together, and apply the result to
+ * r->headers_out.
+ *
+ * To complicate this, a header may be duplicated in either table. Should a
+ * header exist in the top table, all matching headers will be removed from
+ * the bottom table before the headers are combined. The Warning headers are
+ * handled specially. Warnings are added rather than being replaced, while
+ * in the case of revalidation 1xx Warnings are stripped.
+ *
+ * The Content-Type and Last-Modified headers are then re-parsed and inserted
+ * into the request.
*/
-void cache_accept_headers(cache_handle_t *h, request_rec *r,
- int preserve_orig)
+void cache_accept_headers(cache_handle_t *h, request_rec *r, apr_table_t *top,
+ apr_table_t *bottom, int revalidation)
{
- apr_table_t *cookie_table, *hdr_copy;
const char *v;
- v = apr_table_get(h->resp_hdrs, "Content-Type");
+ if (revalidation) {
+ r->headers_out = apr_table_make(r->pool, 10);
+ apr_table_do(filter_header_do, r->headers_out, bottom, NULL);
+ }
+ else if (r->headers_out != bottom) {
+ r->headers_out = apr_table_copy(r->pool, bottom);
+ }
+ apr_table_do(remove_header_do, r->headers_out, top, NULL);
+ apr_table_do(add_header_do, r->headers_out, top, NULL);
+
+ v = apr_table_get(r->headers_out, "Content-Type");
if (v) {
ap_set_content_type(r, v);
- apr_table_unset(h->resp_hdrs, "Content-Type");
/*
* Also unset possible Content-Type headers in r->headers_out and
* r->err_headers_out as they may be different to what we have received
@@ -149,39 +192,12 @@ void cache_accept_headers(cache_handle_t *h, request_rec *r,
/* If the cache gave us a Last-Modified header, we can't just
* pass it on blindly because of restrictions on future values.
*/
- v = apr_table_get(h->resp_hdrs, "Last-Modified");
+ v = apr_table_get(r->headers_out, "Last-Modified");
if (v) {
ap_update_mtime(r, apr_date_parse_http(v));
ap_set_last_modified(r);
- apr_table_unset(h->resp_hdrs, "Last-Modified");
}
- /* The HTTP specification says that it is legal to merge duplicate
- * headers into one. Some browsers that support Cookies don't like
- * merged headers and prefer that each Set-Cookie header is sent
- * separately. Lets humour those browsers by not merging.
- * Oh what a pain it is.
- */
- cookie_table = apr_table_make(r->pool, 2);
- apr_table_do(set_cookie_doo_doo, cookie_table, r->err_headers_out,
- "Set-Cookie", NULL);
- apr_table_do(set_cookie_doo_doo, cookie_table, h->resp_hdrs,
- "Set-Cookie", NULL);
- apr_table_unset(r->err_headers_out, "Set-Cookie");
- apr_table_unset(h->resp_hdrs, "Set-Cookie");
-
- if (preserve_orig) {
- hdr_copy = apr_table_copy(r->pool, h->resp_hdrs);
- apr_table_overlap(hdr_copy, r->headers_out, APR_OVERLAP_TABLES_SET);
- r->headers_out = hdr_copy;
- }
- else {
- apr_table_overlap(r->headers_out, h->resp_hdrs, APR_OVERLAP_TABLES_SET);
- }
- if (!apr_is_empty_table(cookie_table)) {
- r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out,
- cookie_table);
- }
}
/*
@@ -209,6 +225,13 @@ int cache_select(cache_request_rec *cache, request_rec *r)
return DECLINED;
}
+ /* if no-cache, we can't serve from the cache, but we may store to the
+ * cache.
+ */
+ if (!ap_cache_check_no_cache(cache, r)) {
+ return DECLINED;
+ }
+
if (!cache->key) {
rv = cache_generate_key(r, r->pool, &cache->key);
if (rv != APR_SUCCESS) {
@@ -216,10 +239,6 @@ int cache_select(cache_request_rec *cache, request_rec *r)
}
}
- if (!ap_cache_check_allowed(cache, r)) {
- return DECLINED;
- }
-
/* go through the cache types till we get a match */
h = apr_palloc(r->pool, sizeof(cache_handle_t));
@@ -229,7 +248,8 @@ int cache_select(cache_request_rec *cache, request_rec *r)
switch ((rv = list->provider->open_entity(h, r, cache->key))) {
case OK: {
char *vary = NULL;
- int fresh, mismatch = 0;
+ int mismatch = 0;
+ char *last = NULL;
if (list->provider->recall_headers(h, r) != APR_SUCCESS) {
/* try again with next cache type */
@@ -255,25 +275,19 @@ int cache_select(cache_request_rec *cache, request_rec *r)
*
* RFC2616 13.6 and 14.44 describe the Vary mechanism.
*/
- vary = apr_pstrdup(r->pool, apr_table_get(h->resp_hdrs, "Vary"));
- while (vary && *vary) {
- char *name = vary;
+ vary = cache_strqtok(
+ apr_pstrdup(r->pool,
+ cache_table_getm(r->pool, h->resp_hdrs, "Vary")),
+ CACHE_SEPARATOR, &last);
+ while (vary) {
const char *h1, *h2;
- /* isolate header name */
- while (*vary && !apr_isspace(*vary) && (*vary != ','))
- ++vary;
- while (*vary && (apr_isspace(*vary) || (*vary == ','))) {
- *vary = '\0';
- ++vary;
- }
-
/*
* is this header in the request and the header in the cached
* request identical? If not, we give up and do a straight get
*/
- h1 = apr_table_get(r->headers_in, name);
- h2 = apr_table_get(h->req_hdrs, name);
+ h1 = cache_table_getm(r->pool, r->headers_in, vary);
+ h2 = cache_table_getm(r->pool, h->req_hdrs, vary);
if (h1 == h2) {
/* both headers NULL, so a match - do nothing */
}
@@ -283,9 +297,11 @@ int cache_select(cache_request_rec *cache, request_rec *r)
else {
/* headers do not match, so Vary failed */
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r, APLOGNO(00694) "cache_select_url(): Vary header mismatch.");
+ r, APLOGNO(00694) "cache_select(): Vary header mismatch.");
mismatch = 1;
+ break;
}
+ vary = cache_strqtok(NULL, CACHE_SEPARATOR, &last);
}
/* no vary match, try next provider */
@@ -298,9 +314,27 @@ int cache_select(cache_request_rec *cache, request_rec *r)
cache->provider = list->provider;
cache->provider_name = list->provider_name;
+ /*
+ * RFC2616 13.3.4 Rules for When to Use Entity Tags and Last-Modified
+ * Dates: An HTTP/1.1 caching proxy, upon receiving a conditional request
+ * that includes both a Last-Modified date and one or more entity tags as
+ * cache validators, MUST NOT return a locally cached response to the
+ * client unless that cached response is consistent with all of the
+ * conditional header fields in the request.
+ */
+ if (ap_condition_if_match(r, h->resp_hdrs) == AP_CONDITION_NOMATCH
+ || ap_condition_if_unmodified_since(r, h->resp_hdrs)
+ == AP_CONDITION_NOMATCH
+ || ap_condition_if_none_match(r, h->resp_hdrs)
+ == AP_CONDITION_NOMATCH
+ || ap_condition_if_modified_since(r, h->resp_hdrs)
+ == AP_CONDITION_NOMATCH
+ || ap_condition_if_range(r, h->resp_hdrs) == AP_CONDITION_NOMATCH) {
+ mismatch = 1;
+ }
+
/* Is our cached response fresh enough? */
- fresh = cache_check_freshness(h, cache, r);
- if (!fresh) {
+ if (mismatch || !cache_check_freshness(h, cache, r)) {
const char *etag, *lastmod;
/* Cache-Control: only-if-cached and revalidation required, try
@@ -317,42 +351,45 @@ int cache_select(cache_request_rec *cache, request_rec *r)
r->headers_in);
cache->stale_handle = h;
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00695)
- "Cached response for %s isn't fresh. Adding/replacing "
- "conditional request headers.", r->uri);
+ /* if no existing conditionals, use conditionals of our own */
+ if (!mismatch) {
- /* We can only revalidate with our own conditionals: remove the
- * conditions from the original request.
- */
- apr_table_unset(r->headers_in, "If-Match");
- apr_table_unset(r->headers_in, "If-Modified-Since");
- apr_table_unset(r->headers_in, "If-None-Match");
- apr_table_unset(r->headers_in, "If-Range");
- apr_table_unset(r->headers_in, "If-Unmodified-Since");
-
- etag = apr_table_get(h->resp_hdrs, "ETag");
- lastmod = apr_table_get(h->resp_hdrs, "Last-Modified");
-
- if (etag || lastmod) {
- /* If we have a cached etag and/or Last-Modified add in
- * our own conditionals.
- */
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00695) "Cached response for %s isn't fresh. Adding "
+ "conditional request headers.", r->uri);
- if (etag) {
- apr_table_set(r->headers_in, "If-None-Match", etag);
- }
+ /* Remove existing conditionals that might conflict with ours */
+ apr_table_unset(r->headers_in, "If-Match");
+ apr_table_unset(r->headers_in, "If-Modified-Since");
+ apr_table_unset(r->headers_in, "If-None-Match");
+ apr_table_unset(r->headers_in, "If-Range");
+ apr_table_unset(r->headers_in, "If-Unmodified-Since");
- if (lastmod) {
- apr_table_set(r->headers_in, "If-Modified-Since",
- lastmod);
- }
+ etag = apr_table_get(h->resp_hdrs, "ETag");
+ lastmod = apr_table_get(h->resp_hdrs, "Last-Modified");
- /*
- * Do not do Range requests with our own conditionals: If
- * we get 304 the Range does not matter and otherwise the
- * entity changed and we want to have the complete entity
- */
- apr_table_unset(r->headers_in, "Range");
+ if (etag || lastmod) {
+ /* If we have a cached etag and/or Last-Modified add in
+ * our own conditionals.
+ */
+
+ if (etag) {
+ apr_table_set(r->headers_in, "If-None-Match", etag);
+ }
+
+ if (lastmod) {
+ apr_table_set(r->headers_in, "If-Modified-Since",
+ lastmod);
+ }
+
+ /*
+ * Do not do Range requests with our own conditionals: If
+ * we get 304 the Range does not matter and otherwise the
+ * entity changed and we want to have the complete entity
+ */
+ apr_table_unset(r->headers_in, "Range");
+
+ }
}
@@ -361,7 +398,7 @@ int cache_select(cache_request_rec *cache, request_rec *r)
}
/* Okay, this response looks okay. Merge in our stuff and go. */
- cache_accept_headers(h, r, 0);
+ cache_accept_headers(h, r, h->resp_hdrs, r->headers_out, 0);
cache->handle = h;
return OK;
@@ -389,14 +426,15 @@ int cache_select(cache_request_rec *cache, request_rec *r)
return DECLINED;
}
-apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
- const char **key)
+static apr_status_t cache_canonicalise_key(request_rec *r, apr_pool_t* p,
+ const char *uri, apr_uri_t *parsed_uri, const char **key)
{
cache_server_conf *conf;
char *port_str, *hn, *lcs;
const char *hostname, *scheme;
int i;
- char *path, *querystring;
+ const char *path;
+ char *querystring;
if (*key) {
/*
@@ -410,7 +448,7 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* option below.
*/
conf = (cache_server_conf *) ap_get_module_config(r->server->module_config,
- &cache_module);
+ &cache_module);
/*
* Use the canonical name to improve cache hit rate, but only if this is
@@ -436,15 +474,15 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
}
else {
/* Use _default_ as the hostname if none present, as in mod_vhost */
- hostname = ap_get_server_name(r);
+ hostname = ap_get_server_name(r);
if (!hostname) {
hostname = "_default_";
}
}
}
- else if(r->parsed_uri.hostname) {
+ else if (parsed_uri->hostname) {
/* Copy the parsed uri hostname */
- hn = apr_pstrdup(p, r->parsed_uri.hostname);
+ hn = apr_pstrdup(p, parsed_uri->hostname);
ap_str_tolower(hn);
/* const work-around */
hostname = hn;
@@ -463,9 +501,9 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* "no proxy request" and "reverse proxy request" are handled in the same
* manner (see above why this is needed).
*/
- if (r->proxyreq && r->parsed_uri.scheme) {
+ if (r->proxyreq && parsed_uri->scheme) {
/* Copy the scheme and lower-case it */
- lcs = apr_pstrdup(p, r->parsed_uri.scheme);
+ lcs = apr_pstrdup(p, parsed_uri->scheme);
ap_str_tolower(lcs);
/* const work-around */
scheme = lcs;
@@ -488,11 +526,11 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* server.
*/
if (r->proxyreq && (r->proxyreq != PROXYREQ_REVERSE)) {
- if (r->parsed_uri.port_str) {
- port_str = apr_pcalloc(p, strlen(r->parsed_uri.port_str) + 2);
+ if (parsed_uri->port_str) {
+ port_str = apr_pcalloc(p, strlen(parsed_uri->port_str) + 2);
port_str[0] = ':';
- for (i = 0; r->parsed_uri.port_str[i]; i++) {
- port_str[i + 1] = apr_tolower(r->parsed_uri.port_str[i]);
+ for (i = 0; parsed_uri->port_str[i]; i++) {
+ port_str[i + 1] = apr_tolower(parsed_uri->port_str[i]);
}
}
else if (apr_uri_port_of_scheme(scheme)) {
@@ -524,26 +562,26 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* Check if we need to ignore session identifiers in the URL and do so
* if needed.
*/
- path = r->uri;
- querystring = r->parsed_uri.query;
+ path = uri;
+ querystring = parsed_uri->query;
if (conf->ignore_session_id->nelts) {
int i;
char **identifier;
- identifier = (char **)conf->ignore_session_id->elts;
+ identifier = (char **) conf->ignore_session_id->elts;
for (i = 0; i < conf->ignore_session_id->nelts; i++, identifier++) {
int len;
- char *param;
+ const char *param;
len = strlen(*identifier);
/*
* Check that we have a parameter separator in the last segment
* of the path and that the parameter matches our identifier
*/
- if ((param = strrchr(path, ';'))
- && !strncmp(param + 1, *identifier, len)
- && (*(param + len + 1) == '=')
- && !strchr(param + len + 2, '/')) {
+ if ((param = ap_strrchr_c(path, ';'))
+ && !strncmp(param + 1, *identifier, len)
+ && (*(param + len + 1) == '=')
+ && !ap_strchr_c(param + len + 2, '/')) {
path = apr_pstrndup(p, path, param - path);
continue;
}
@@ -556,7 +594,7 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* querystring and followed by a '='
*/
if (!strncmp(querystring, *identifier, len)
- && (*(querystring + len) == '=')) {
+ && (*(querystring + len) == '=')) {
param = querystring;
}
else {
@@ -574,18 +612,19 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
}
}
if (param) {
- char *amp;
+ const char *amp;
if (querystring != param) {
querystring = apr_pstrndup(p, querystring,
- param - querystring);
+ param - querystring);
}
else {
querystring = "";
}
- if ((amp = strchr(param + len + 1, '&'))) {
- querystring = apr_pstrcat(p, querystring, amp + 1, NULL);
+ if ((amp = ap_strchr_c(param + len + 1, '&'))) {
+ querystring = apr_pstrcat(p, querystring, amp + 1,
+ NULL);
}
else {
/*
@@ -605,12 +644,12 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
/* Key format is a URI, optionally without the query-string */
if (conf->ignorequerystring) {
- *key = apr_pstrcat(p, scheme, "://", hostname, port_str,
- path, "?", NULL);
+ *key = apr_pstrcat(p, scheme, "://", hostname, port_str, path, "?",
+ NULL);
}
else {
- *key = apr_pstrcat(p, scheme, "://", hostname, port_str,
- path, "?", querystring, NULL);
+ *key = apr_pstrcat(p, scheme, "://", hostname, port_str, path, "?",
+ querystring, NULL);
}
/*
@@ -621,9 +660,118 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* resource in the cache under a key where it is never found by the quick
* handler during following requests.
*/
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00698)
- "cache: Key for entity %s?%s is %s", r->uri,
- r->parsed_uri.query, *key);
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00698) "cache: Key for entity %s?%s is %s", uri, parsed_uri->query, *key);
return APR_SUCCESS;
}
+
+apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
+ const char **key)
+{
+ return cache_canonicalise_key(r, p, r->uri, &r->parsed_uri, key);
+}
+
+/*
+ * Invalidate a specific URL entity in all caches
+ *
+ * All cached entities for this URL are removed, usually in
+ * response to a POST/PUT or DELETE.
+ *
+ * This function returns OK if at least one entity was found and
+ * removed, and DECLINED if no cached entities were removed.
+ */
+int cache_invalidate(cache_request_rec *cache, request_rec *r)
+{
+ cache_provider_list *list;
+ apr_status_t rv, status = DECLINED;
+ cache_handle_t *h;
+ apr_uri_t location_uri;
+ apr_uri_t content_location_uri;
+
+ const char *location, *location_key = NULL;
+ const char *content_location, *content_location_key = NULL;
+
+ if (!cache) {
+ /* This should never happen */
+ ap_log_rerror(
+ APLOG_MARK, APLOG_ERR, APR_EGENERAL, r, APLOGNO(00697) "cache: No cache request information available for key"
+ " generation");
+ return DECLINED;
+ }
+
+ if (!cache->key) {
+ rv = cache_generate_key(r, r->pool, &cache->key);
+ if (rv != APR_SUCCESS) {
+ return DECLINED;
+ }
+ }
+
+ location = apr_table_get(r->headers_out, "Location");
+ if (location) {
+ if (APR_SUCCESS != apr_uri_parse(r->pool, location, &location_uri)
+ || APR_SUCCESS
+ != cache_canonicalise_key(r, r->pool, location,
+ &location_uri, &location_key)
+ || strcmp(r->parsed_uri.hostname, location_uri.hostname)) {
+ location_key = NULL;
+ }
+ }
+
+ content_location = apr_table_get(r->headers_out, "Content-Location");
+ if (content_location) {
+ if (APR_SUCCESS
+ != apr_uri_parse(r->pool, content_location,
+ &content_location_uri)
+ || APR_SUCCESS
+ != cache_canonicalise_key(r, r->pool, content_location,
+ &content_location_uri, &content_location_key)
+ || strcmp(r->parsed_uri.hostname,
+ content_location_uri.hostname)) {
+ content_location_key = NULL;
+ }
+ }
+
+ /* go through the cache types */
+ h = apr_palloc(r->pool, sizeof(cache_handle_t));
+
+ list = cache->providers;
+
+ while (list) {
+
+ /* invalidate the request uri */
+ rv = list->provider->open_entity(h, r, cache->key);
+ if (OK == rv) {
+ rv = list->provider->invalidate_entity(h, r);
+ status = OK;
+ }
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(02468) "cache: Attempted to invalidate cached entity with key: %s", cache->key);
+
+ /* invalidate the Location */
+ if (location_key) {
+ rv = list->provider->open_entity(h, r, location_key);
+ if (OK == rv) {
+ rv = list->provider->invalidate_entity(h, r);
+ status = OK;
+ }
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(02469) "cache: Attempted to invalidate cached entity with key: %s", location_key);
+ }
+
+ /* invalidate the Content-Location */
+ if (content_location_key) {
+ rv = list->provider->open_entity(h, r, content_location_key);
+ if (OK == rv) {
+ rv = list->provider->invalidate_entity(h, r);
+ status = OK;
+ }
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(02470) "cache: Attempted to invalidate cached entity with key: %s", content_location_key);
+ }
+
+ list = list->next;
+ }
+
+ return status;
+}
diff --git a/modules/cache/cache_storage.h b/modules/cache/cache_storage.h
index 2b67970e..83f2946f 100644
--- a/modules/cache/cache_storage.h
+++ b/modules/cache/cache_storage.h
@@ -40,6 +40,20 @@ int cache_remove_url(cache_request_rec *cache, request_rec *r);
int cache_create_entity(cache_request_rec *cache, request_rec *r,
apr_off_t size, apr_bucket_brigade *in);
int cache_select(cache_request_rec *cache, request_rec *r);
+
+/**
+ * invalidate a specific URL entity in all caches
+ *
+ * All cached entities for this URL are removed, usually in
+ * response to a POST/PUT or DELETE.
+ *
+ * This function returns OK if at least one entity was found and
+ * removed, and DECLINED if no cached entities were removed.
+ * @param cache cache_request_rec
+ * @param r request_rec
+ */
+int cache_invalidate(cache_request_rec *cache, request_rec *r);
+
apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
const char **key);
@@ -47,11 +61,12 @@ apr_status_t cache_generate_key_default(request_rec *r, apr_pool_t* p,
* Merge in cached headers into the response
* @param h cache_handle_t
* @param r request_rec
- * @param preserve_orig If 1, the values in r->headers_out are preserved.
- * Otherwise, they are overwritten by the cached value.
+ * @param top headers to be applied
+ * @param bottom headers to be overwritten
+ * @param revalidation true if revalidation is taking place
*/
-void cache_accept_headers(cache_handle_t *h, request_rec *r,
- int preserve_orig);
+void cache_accept_headers(cache_handle_t *h, request_rec *r, apr_table_t *top,
+ apr_table_t *bottom, int revalidation);
#ifdef __cplusplus
}
diff --git a/modules/cache/cache_util.c b/modules/cache/cache_util.c
index 1e5098d5..7b7fb45c 100644
--- a/modules/cache/cache_util.c
+++ b/modules/cache/cache_util.c
@@ -27,8 +27,6 @@ extern APR_OPTIONAL_FN_TYPE(ap_cache_generate_key) *cache_generate_key;
extern module AP_MODULE_DECLARE_DATA cache_module;
-#define CACHE_SEPARATOR ", "
-
/* Determine if "url" matches the hostname, scheme and port and path
* in "filter". All but the path comparisons are case-insensitive.
*/
@@ -412,9 +410,9 @@ apr_status_t cache_remove_lock(cache_server_conf *conf,
return apr_file_remove(lockname, r->pool);
}
-CACHE_DECLARE(int) ap_cache_check_allowed(cache_request_rec *cache, request_rec *r) {
- const char *cc_req;
- const char *pragma;
+int ap_cache_check_no_cache(cache_request_rec *cache, request_rec *r)
+{
+
cache_server_conf *conf =
(cache_server_conf *)ap_get_module_config(r->server->module_config,
&cache_module);
@@ -429,16 +427,15 @@ CACHE_DECLARE(int) ap_cache_check_allowed(cache_request_rec *cache, request_rec
* - RFC2616 14.9.4 End to end reload, Cache-Control: no-cache, or Pragma:
* no-cache. The server MUST NOT use a cached copy when responding to such
* a request.
- *
- * - RFC2616 14.9.2 What May be Stored by Caches. If Cache-Control:
- * no-store arrives, do not serve from the cache.
*/
/* This value comes from the client's initial request. */
- cc_req = apr_table_get(r->headers_in, "Cache-Control");
- pragma = apr_table_get(r->headers_in, "Pragma");
-
- ap_cache_control(r, &cache->control_in, cc_req, pragma, r->headers_in);
+ if (!cache->control_in.parsed) {
+ const char *cc_req = cache_table_getm(r->pool, r->headers_in,
+ "Cache-Control");
+ const char *pragma = cache_table_getm(r->pool, r->headers_in, "Pragma");
+ ap_cache_control(r, &cache->control_in, cc_req, pragma, r->headers_in);
+ }
if (cache->control_in.no_cache) {
@@ -453,6 +450,32 @@ CACHE_DECLARE(int) ap_cache_check_allowed(cache_request_rec *cache, request_rec
}
}
+ return 1;
+}
+
+int ap_cache_check_no_store(cache_request_rec *cache, request_rec *r)
+{
+
+ cache_server_conf *conf =
+ (cache_server_conf *)ap_get_module_config(r->server->module_config,
+ &cache_module);
+
+ /*
+ * At this point, we may have data cached, but the request may have
+ * specified that cached data may not be used in a response.
+ *
+ * - RFC2616 14.9.2 What May be Stored by Caches. If Cache-Control:
+ * no-store arrives, do not serve from or store to the cache.
+ */
+
+ /* This value comes from the client's initial request. */
+ if (!cache->control_in.parsed) {
+ const char *cc_req = cache_table_getm(r->pool, r->headers_in,
+ "Cache-Control");
+ const char *pragma = cache_table_getm(r->pool, r->headers_in, "Pragma");
+ ap_cache_control(r, &cache->control_in, cc_req, pragma, r->headers_in);
+ }
+
if (cache->control_in.no_store) {
if (!conf->ignorecachecontrol) {
@@ -470,7 +493,6 @@ CACHE_DECLARE(int) ap_cache_check_allowed(cache_request_rec *cache, request_rec
return 1;
}
-
int cache_check_freshness(cache_handle_t *h, cache_request_rec *cache,
request_rec *r)
{
@@ -543,12 +565,12 @@ int cache_check_freshness(cache_handle_t *h, cache_request_rec *cache,
/* These come from the cached entity. */
if (h->cache_obj->info.control.no_cache
- || h->cache_obj->info.control.no_cache_header
- || h->cache_obj->info.control.private_header) {
+ || h->cache_obj->info.control.invalidated) {
/*
* The cached entity contained Cache-Control: no-cache, or a
* no-cache with a header present, or a private with a header
- * present, so treat as stale causing revalidation.
+ * present, or the cached entity has been invalidated in the
+ * past, so treat as stale causing revalidation.
*/
return 0;
}
@@ -858,100 +880,11 @@ CACHE_DECLARE(char *)ap_cache_generate_name(apr_pool_t *p, int dirlevels,
return apr_pstrdup(p, hashfile);
}
-/*
- * Create a new table consisting of those elements from an
- * headers table that are allowed to be stored in a cache.
- */
-CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_headers(apr_pool_t *pool,
- apr_table_t *t,
- server_rec *s)
-{
- cache_server_conf *conf;
- char **header;
- int i;
- apr_table_t *headers_out;
-
- /* Short circuit the common case that there are not
- * (yet) any headers populated.
- */
- if (t == NULL) {
- return apr_table_make(pool, 10);
- };
-
- /* Make a copy of the headers, and remove from
- * the copy any hop-by-hop headers, as defined in Section
- * 13.5.1 of RFC 2616
- */
- headers_out = apr_table_copy(pool, t);
-
- apr_table_unset(headers_out, "Connection");
- apr_table_unset(headers_out, "Keep-Alive");
- apr_table_unset(headers_out, "Proxy-Authenticate");
- apr_table_unset(headers_out, "Proxy-Authorization");
- apr_table_unset(headers_out, "TE");
- apr_table_unset(headers_out, "Trailers");
- apr_table_unset(headers_out, "Transfer-Encoding");
- apr_table_unset(headers_out, "Upgrade");
-
- conf = (cache_server_conf *)ap_get_module_config(s->module_config,
- &cache_module);
-
- /* Remove the user defined headers set with CacheIgnoreHeaders.
- * This may break RFC 2616 compliance on behalf of the administrator.
- */
- header = (char **)conf->ignore_headers->elts;
- for (i = 0; i < conf->ignore_headers->nelts; i++) {
- apr_table_unset(headers_out, header[i]);
- }
- return headers_out;
-}
-
-/*
- * Create a new table consisting of those elements from an input
- * headers table that are allowed to be stored in a cache.
- */
-CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_headers_in(request_rec *r)
-{
- return ap_cache_cacheable_headers(r->pool, r->headers_in, r->server);
-}
-
-/*
- * Create a new table consisting of those elements from an output
- * headers table that are allowed to be stored in a cache;
- * ensure there is a content type and capture any errors.
- */
-CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_headers_out(request_rec *r)
-{
- apr_table_t *headers_out;
-
- headers_out = apr_table_overlay(r->pool, r->headers_out,
- r->err_headers_out);
-
- apr_table_clear(r->err_headers_out);
-
- headers_out = ap_cache_cacheable_headers(r->pool, headers_out,
- r->server);
-
- if (!apr_table_get(headers_out, "Content-Type")
- && r->content_type) {
- apr_table_setn(headers_out, "Content-Type",
- ap_make_content_type(r, r->content_type));
- }
-
- if (!apr_table_get(headers_out, "Content-Encoding")
- && r->content_encoding) {
- apr_table_setn(headers_out, "Content-Encoding",
- r->content_encoding);
- }
-
- return headers_out;
-}
-
/**
* String tokenizer that ignores separator characters within quoted strings
* and escaped characters, as per RFC2616 section 2.2.
*/
-static char *cache_strqtok(char *str, const char *sep, char **last)
+char *cache_strqtok(char *str, const char *sep, char **last)
{
char *token;
int quoted = 0;
@@ -960,6 +893,10 @@ static char *cache_strqtok(char *str, const char *sep, char **last)
str = *last; /* start where we left off */
}
+ if (!str) { /* no more tokens */
+ return NULL;
+ }
+
/* skip characters in sep (will terminate at '\0') */
while (*str && ap_strchr_c(sep, *str)) {
++str;
@@ -979,7 +916,7 @@ static char *cache_strqtok(char *str, const char *sep, char **last)
*last = token;
while (**last) {
if (!quoted) {
- if (**last == '\"') {
+ if (**last == '\"' && !ap_strchr_c(sep, '\"')) {
quoted = 1;
++*last;
}
@@ -1069,9 +1006,7 @@ int ap_cache_control(request_rec *r, cache_control_t *cc,
/* ...then try slowest cases */
else if (!strncasecmp(token, "no-cache", 8)) {
if (token[8] == '=') {
- if (apr_table_get(headers, token + 9)) {
- cc->no_cache_header = 1;
- }
+ cc->no_cache_header = 1;
}
else if (!token[8]) {
cc->no_cache = 1;
@@ -1146,9 +1081,7 @@ int ap_cache_control(request_rec *r, cache_control_t *cc,
}
else if (!strncasecmp(token, "private", 7)) {
if (token[7] == '=') {
- if (apr_table_get(headers, token + 8)) {
- cc->private_header = 1;
- }
+ cc->private_header = 1;
}
else if (!token[7]) {
cc->private = 1;
@@ -1179,3 +1112,209 @@ int ap_cache_control(request_rec *r, cache_control_t *cc,
return (cc_header != NULL || pragma_header != NULL);
}
+
+/**
+ * Parse the Cache-Control, identifying and removing headers that
+ * exist as tokens after the no-cache and private tokens.
+ */
+static int cache_control_remove(request_rec *r, const char *cc_header,
+ apr_table_t *headers)
+{
+ char *last, *slast;
+ int found = 0;
+
+ if (cc_header) {
+ char *header = apr_pstrdup(r->pool, cc_header);
+ char *token = cache_strqtok(header, CACHE_SEPARATOR, &last);
+ while (token) {
+ switch (token[0]) {
+ case 'n':
+ case 'N': {
+ if (!strncmp(token, "no-cache", 8)
+ || !strncasecmp(token, "no-cache", 8)) {
+ if (token[8] == '=') {
+ const char *header = cache_strqtok(token + 9,
+ CACHE_SEPARATOR "\"", &slast);
+ while (header) {
+ apr_table_unset(headers, header);
+ header = cache_strqtok(NULL, CACHE_SEPARATOR "\"",
+ &slast);
+ }
+ found = 1;
+ }
+ break;
+ }
+ break;
+ }
+ case 'p':
+ case 'P': {
+ if (!strncmp(token, "private", 7)
+ || !strncasecmp(token, "private", 7)) {
+ if (token[7] == '=') {
+ const char *header = cache_strqtok(token + 8,
+ CACHE_SEPARATOR "\"", &slast);
+ while (header) {
+ apr_table_unset(headers, header);
+ header = cache_strqtok(NULL, CACHE_SEPARATOR "\"",
+ &slast);
+ }
+ found = 1;
+ }
+ }
+ break;
+ }
+ }
+ token = cache_strqtok(NULL, CACHE_SEPARATOR, &last);
+ }
+ }
+
+ return found;
+}
+
+/*
+ * Create a new table consisting of those elements from an
+ * headers table that are allowed to be stored in a cache.
+ */
+CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_headers(apr_pool_t *pool,
+ apr_table_t *t,
+ server_rec *s)
+{
+ cache_server_conf *conf;
+ char **header;
+ int i;
+ apr_table_t *headers_out;
+
+ /* Short circuit the common case that there are not
+ * (yet) any headers populated.
+ */
+ if (t == NULL) {
+ return apr_table_make(pool, 10);
+ };
+
+ /* Make a copy of the headers, and remove from
+ * the copy any hop-by-hop headers, as defined in Section
+ * 13.5.1 of RFC 2616
+ */
+ headers_out = apr_table_copy(pool, t);
+
+ apr_table_unset(headers_out, "Connection");
+ apr_table_unset(headers_out, "Keep-Alive");
+ apr_table_unset(headers_out, "Proxy-Authenticate");
+ apr_table_unset(headers_out, "Proxy-Authorization");
+ apr_table_unset(headers_out, "TE");
+ apr_table_unset(headers_out, "Trailers");
+ apr_table_unset(headers_out, "Transfer-Encoding");
+ apr_table_unset(headers_out, "Upgrade");
+
+ conf = (cache_server_conf *)ap_get_module_config(s->module_config,
+ &cache_module);
+
+ /* Remove the user defined headers set with CacheIgnoreHeaders.
+ * This may break RFC 2616 compliance on behalf of the administrator.
+ */
+ header = (char **)conf->ignore_headers->elts;
+ for (i = 0; i < conf->ignore_headers->nelts; i++) {
+ apr_table_unset(headers_out, header[i]);
+ }
+ return headers_out;
+}
+
+/*
+ * Create a new table consisting of those elements from an input
+ * headers table that are allowed to be stored in a cache.
+ */
+CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_headers_in(request_rec *r)
+{
+ return ap_cache_cacheable_headers(r->pool, r->headers_in, r->server);
+}
+
+/*
+ * Create a new table consisting of those elements from an output
+ * headers table that are allowed to be stored in a cache;
+ * ensure there is a content type and capture any errors.
+ */
+CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_headers_out(request_rec *r)
+{
+ apr_table_t *headers_out;
+
+ headers_out = apr_table_overlay(r->pool, r->headers_out,
+ r->err_headers_out);
+
+ apr_table_clear(r->err_headers_out);
+
+ headers_out = ap_cache_cacheable_headers(r->pool, headers_out,
+ r->server);
+
+ cache_control_remove(r,
+ cache_table_getm(r->pool, headers_out, "Cache-Control"),
+ headers_out);
+
+ if (!apr_table_get(headers_out, "Content-Type")
+ && r->content_type) {
+ apr_table_setn(headers_out, "Content-Type",
+ ap_make_content_type(r, r->content_type));
+ }
+
+ if (!apr_table_get(headers_out, "Content-Encoding")
+ && r->content_encoding) {
+ apr_table_setn(headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ return headers_out;
+}
+
+typedef struct
+{
+ apr_pool_t *p;
+ const char *first;
+ apr_array_header_t *merged;
+} cache_table_getm_t;
+
+static int cache_table_getm_do(void *v, const char *key, const char *val)
+{
+ cache_table_getm_t *state = (cache_table_getm_t *) v;
+
+ if (!state->first) {
+ /**
+ * The most common case is a single header, and this is covered by
+ * a fast path that doesn't allocate any memory. On the second and
+ * subsequent header, an array is created and the array concatenated
+ * together to form the final value.
+ */
+ state->first = val;
+ }
+ else {
+ const char **elt;
+ if (!state->merged) {
+ state->merged = apr_array_make(state->p, 10, sizeof(const char *));
+ elt = apr_array_push(state->merged);
+ *elt = state->first;
+ }
+ elt = apr_array_push(state->merged);
+ *elt = val;
+ }
+ return 1;
+}
+
+const char *cache_table_getm(apr_pool_t *p, const apr_table_t *t,
+ const char *key)
+{
+ cache_table_getm_t state;
+
+ state.p = p;
+ state.first = NULL;
+ state.merged = NULL;
+
+ apr_table_do(cache_table_getm_do, &state, t, key, NULL);
+
+ if (!state.first) {
+ return NULL;
+ }
+ else if (!state.merged) {
+ return state.first;
+ }
+ else {
+ return apr_array_pstrcat(p, state.merged, ',');
+ }
+}
diff --git a/modules/cache/cache_util.h b/modules/cache/cache_util.h
index eec38f3a..3a54fadd 100644
--- a/modules/cache/cache_util.h
+++ b/modules/cache/cache_util.h
@@ -99,6 +99,7 @@ extern "C" {
#define CACHE_LOCKNAME_KEY "mod_cache-lockname"
#define CACHE_LOCKFILE_KEY "mod_cache-lockfile"
#define CACHE_CTX_KEY "mod_cache-ctx"
+#define CACHE_SEPARATOR ", "
/**
* cache_util.c
@@ -238,7 +239,16 @@ typedef struct {
* @param r request_rec
* @return 0 ==> cache object may not be served, 1 ==> cache object may be served
*/
-CACHE_DECLARE(int) ap_cache_check_allowed(cache_request_rec *cache, request_rec *r);
+int ap_cache_check_no_cache(cache_request_rec *cache, request_rec *r);
+
+/**
+ * Check the whether the request allows a cached object to be stored as per RFC2616
+ * section 14.9.2 (What May be Stored by Caches)
+ * @param cache cache_request_rec
+ * @param r request_rec
+ * @return 0 ==> cache object may not be served, 1 ==> cache object may be served
+ */
+int ap_cache_check_no_store(cache_request_rec *cache, request_rec *r);
/**
* Check the freshness of the cache object per RFC2616 section 13.2 (Expiration Model)
@@ -292,6 +302,25 @@ apr_status_t cache_remove_lock(cache_server_conf *conf,
cache_provider_list *cache_get_providers(request_rec *r,
cache_server_conf *conf, apr_uri_t uri);
+/**
+ * Get a value from a table, where the table may contain multiple
+ * values for a given key.
+ *
+ * When the table contains a single value, that value is returned
+ * unchanged.
+ *
+ * When the table contains two or more values for a key, all values
+ * for the key are returned, separated by commas.
+ */
+const char *cache_table_getm(apr_pool_t *p, const apr_table_t *t,
+ const char *key);
+
+/**
+ * String tokenizer that ignores separator characters within quoted strings
+ * and escaped characters, as per RFC2616 section 2.2.
+ */
+char *cache_strqtok(char *str, const char *sep, char **last);
+
#ifdef __cplusplus
}
#endif
diff --git a/modules/cache/config.m4 b/modules/cache/config.m4
index 5647e89b..b9799b76 100644
--- a/modules/cache/config.m4
+++ b/modules/cache/config.m4
@@ -13,17 +13,20 @@ cache_storage.lo dnl
cache_util.lo dnl
"
cache_disk_objs="mod_cache_disk.lo"
+cache_socache_objs="mod_cache_socache.lo"
case "$host" in
*os2*)
# OS/2 DLLs must resolve all symbols at build time
# and we need some from main cache module
cache_disk_objs="$cache_disk_objs mod_cache.la"
+ cache_socache_objs="$cache_socache_objs mod_cache.la"
;;
esac
APACHE_MODULE(cache, dynamic file caching. At least one storage management module (e.g. mod_cache_disk) is also necessary., $cache_objs, , most)
APACHE_MODULE(cache_disk, disk caching module, $cache_disk_objs, , most, , cache)
+APACHE_MODULE(cache_socache, shared object caching module, $cache_socache_objs, , most)
dnl
dnl APACHE_CHECK_DISTCACHE
diff --git a/modules/cache/mod_cache.c b/modules/cache/mod_cache.c
index 4f2d3e04..06bdf460 100644
--- a/modules/cache/mod_cache.c
+++ b/modules/cache/mod_cache.c
@@ -34,6 +34,7 @@ static ap_filter_rec_t *cache_save_subreq_filter_handle;
static ap_filter_rec_t *cache_out_filter_handle;
static ap_filter_rec_t *cache_out_subreq_filter_handle;
static ap_filter_rec_t *cache_remove_url_filter_handle;
+static ap_filter_rec_t *cache_invalidate_filter_handle;
/*
* CACHE handler
@@ -75,11 +76,6 @@ static int cache_quick_handler(request_rec *r, int lookup)
ap_filter_rec_t *cache_out_handle;
cache_server_conf *conf;
- /* Delay initialization until we know we are handling a GET */
- if (r->method_number != M_GET) {
- return DECLINED;
- }
-
conf = (cache_server_conf *) ap_get_module_config(r->server->module_config,
&cache_module);
@@ -106,6 +102,9 @@ static int cache_quick_handler(request_rec *r, int lookup)
/*
* Are we allowed to serve cached info at all?
*/
+ if (!ap_cache_check_no_store(cache, r)) {
+ return DECLINED;
+ }
/* find certain cache controlling headers */
auth = apr_table_get(r->headers_in, "Authorization");
@@ -117,6 +116,40 @@ static int cache_quick_handler(request_rec *r, int lookup)
return DECLINED;
}
+ /* Are we PUT/POST/DELETE? If so, prepare to invalidate the cached entities.
+ */
+ switch (r->method_number) {
+ case M_PUT:
+ case M_POST:
+ case M_DELETE:
+ {
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(02461)
+ "PUT/POST/DELETE: Adding CACHE_INVALIDATE filter for %s",
+ r->uri);
+
+ /* Add cache_invalidate filter to this request to force a
+ * cache entry to be invalidated if the response is
+ * ultimately successful (2xx).
+ */
+ ap_add_output_filter_handle(
+ cache_invalidate_filter_handle, cache, r,
+ r->connection);
+
+ return DECLINED;
+ }
+ case M_GET: {
+ break;
+ }
+ default : {
+
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(02462) "cache: Method '%s' not cacheable by mod_cache, ignoring: %s", r->method, r->uri);
+
+ return DECLINED;
+ }
+ }
+
/*
* Try to serve this request from the cache.
*
@@ -176,9 +209,10 @@ static int cache_quick_handler(request_rec *r, int lookup)
* is available later during running the filter may be
* different due to an internal redirect.
*/
- cache->remove_url_filter =
- ap_add_output_filter_handle(cache_remove_url_filter_handle,
- cache, r, r->connection);
+ cache->remove_url_filter = ap_add_output_filter_handle(
+ cache_remove_url_filter_handle, cache, r,
+ r->connection);
+
}
else {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv,
@@ -347,11 +381,6 @@ static int cache_handler(request_rec *r)
ap_filter_rec_t *cache_save_handle;
cache_server_conf *conf;
- /* Delay initialization until we know we are handling a GET */
- if (r->method_number != M_GET) {
- return DECLINED;
- }
-
conf = (cache_server_conf *) ap_get_module_config(r->server->module_config,
&cache_module);
@@ -376,6 +405,47 @@ static int cache_handler(request_rec *r)
cache->providers = providers;
/*
+ * Are we allowed to serve cached info at all?
+ */
+ if (!ap_cache_check_no_store(cache, r)) {
+ return DECLINED;
+ }
+
+ /* Are we PUT/POST/DELETE? If so, prepare to invalidate the cached entities.
+ */
+ switch (r->method_number) {
+ case M_PUT:
+ case M_POST:
+ case M_DELETE:
+ {
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(02463)
+ "PUT/POST/DELETE: Adding CACHE_INVALIDATE filter for %s",
+ r->uri);
+
+ /* Add cache_invalidate filter to this request to force a
+ * cache entry to be invalidated if the response is
+ * ultimately successful (2xx).
+ */
+ ap_add_output_filter_handle(
+ cache_invalidate_filter_handle, cache, r,
+ r->connection);
+
+ return DECLINED;
+ }
+ case M_GET: {
+ break;
+ }
+ default : {
+
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(02464) "cache: Method '%s' not cacheable by mod_cache, ignoring: %s", r->method, r->uri);
+
+ return DECLINED;
+ }
+ }
+
+ /*
* Try to serve this request from the cache.
*
* If no existing cache file (DECLINED)
@@ -455,9 +525,10 @@ static int cache_handler(request_rec *r)
* is available later during running the filter may be
* different due to an internal redirect.
*/
- cache->remove_url_filter =
- ap_add_output_filter_handle(cache_remove_url_filter_handle,
- cache, r, r->connection);
+ cache->remove_url_filter
+ = ap_add_output_filter_handle(
+ cache_remove_url_filter_handle, cache, r,
+ r->connection);
}
else {
@@ -665,7 +736,7 @@ static int cache_save_store(ap_filter_t *f, apr_bucket_brigade *in,
*/
ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, f->r, APLOGNO(00766)
"cache: Cache provider's store_body returned an "
- "empty brigade, but didn't consume all of the"
+ "empty brigade, but didn't consume all of the "
"input brigade, standing down to prevent a spin");
ap_remove_output_filter(f);
@@ -682,6 +753,22 @@ static int cache_save_store(ap_filter_t *f, apr_bucket_brigade *in,
return rv;
}
+/**
+ * Sanity check for 304 Not Modified responses, as per RFC2616 Section 10.3.5.
+ */
+static const char *cache_header_cmp(apr_pool_t *pool, apr_table_t *left,
+ apr_table_t *right, const char *key)
+{
+ const char *h1, *h2;
+
+ if ((h1 = cache_table_getm(pool, left, key))
+ && (h2 = cache_table_getm(pool, right, key)) && (strcmp(h1, h2))) {
+ return apr_pstrcat(pool, "contradiction: 304 Not Modified, but ", key,
+ " modified", NULL);
+ }
+ return NULL;
+}
+
/*
* CACHE_SAVE filter
* ---------------
@@ -715,7 +802,7 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
apr_time_t exp, date, lastmod, now;
apr_off_t size = -1;
cache_info *info = NULL;
- char *reason;
+ const char *reason;
apr_pool_t *p;
apr_bucket *e;
apr_table_t *headers;
@@ -857,12 +944,12 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
if (etag == NULL) {
etag = apr_table_get(r->headers_out, "Etag");
}
- cc_out = apr_table_get(r->err_headers_out, "Cache-Control");
- pragma = apr_table_get(r->err_headers_out, "Pragma");
+ cc_out = cache_table_getm(r->pool, r->err_headers_out, "Cache-Control");
+ pragma = cache_table_getm(r->pool, r->err_headers_out, "Pragma");
headers = r->err_headers_out;
if (!cc_out && !pragma) {
- cc_out = apr_table_get(r->headers_out, "Cache-Control");
- pragma = apr_table_get(r->headers_out, "Pragma");
+ cc_out = cache_table_getm(r->pool, r->headers_out, "Cache-Control");
+ pragma = cache_table_getm(r->pool, r->headers_out, "Pragma");
headers = r->headers_out;
}
@@ -871,8 +958,10 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
*/
if (r->status == HTTP_NOT_MODIFIED && cache->stale_handle && !cc_out
&& !pragma) {
- cc_out = apr_table_get(cache->stale_handle->resp_hdrs, "Cache-Control");
- pragma = apr_table_get(cache->stale_handle->resp_hdrs, "Pragma");
+ cc_out = cache_table_getm(r->pool, cache->stale_handle->resp_hdrs,
+ "Cache-Control");
+ pragma = cache_table_getm(r->pool, cache->stale_handle->resp_hdrs,
+ "Pragma");
}
/* Parse the cache control header */
@@ -1000,78 +1089,117 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
/* or we've been asked not to cache it above */
reason = "r->no_cache present";
}
+ else if (cache->stale_handle
+ && APR_DATE_BAD
+ != (date = apr_date_parse_http(
+ apr_table_get(r->headers_out, "Date")))
+ && date < cache->stale_handle->cache_obj->info.date) {
- /* Hold the phone. Some servers might allow us to cache a 2xx, but
- * then make their 304 responses non cacheable. This leaves us in a
- * sticky position. If the 304 is in answer to our own conditional
- * request, we cannot send this 304 back to the client because the
- * client isn't expecting it. Instead, our only option is to respect
- * the answer to the question we asked (has it changed, answer was
- * no) and return the cached item to the client, and then respect
- * the uncacheable nature of this 304 by allowing the remove_url
- * filter to kick in and remove the cached entity.
- */
- if (reason && r->status == HTTP_NOT_MODIFIED &&
- cache->stale_handle) {
- apr_bucket_brigade *bb;
- apr_bucket *bkt;
- int status;
-
- cache->handle = cache->stale_handle;
- info = &cache->handle->cache_obj->info;
-
- /* Load in the saved status and clear the status line. */
- r->status = info->status;
- r->status_line = NULL;
-
- bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ /**
+ * 13.12 Cache Replacement:
+ *
+ * Note: a new response that has an older Date header value than
+ * existing cached responses is not cacheable.
+ */
+ reason = "updated entity is older than cached entity";
- r->headers_in = cache->stale_headers;
- status = ap_meets_conditions(r);
- if (status != OK) {
- r->status = status;
+ /* while this response is not cacheable, the previous response still is */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00770)
+ "cache: Removing CACHE_REMOVE_URL filter.");
+ ap_remove_output_filter(cache->remove_url_filter);
+ }
+ else if (r->status == HTTP_NOT_MODIFIED && cache->stale_handle) {
+ apr_table_t *left = cache->stale_handle->resp_hdrs;
+ apr_table_t *right = r->headers_out;
- bkt = apr_bucket_flush_create(bb->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, bkt);
+ /* and lastly, contradiction checks for revalidated responses
+ * as per RFC2616 Section 10.3.5
+ */
+ if (((reason = cache_header_cmp(r->pool, left, right, "Allow")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Content-Encoding")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Content-Language")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Content-Length")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Content-Location")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Content-MD5")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Content-Range")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Content-Type")))
+ || ((reason = cache_header_cmp(r->pool, left, right, "Expires")))
+ || ((reason = cache_header_cmp(r->pool, left, right, "ETag")))
+ || ((reason = cache_header_cmp(r->pool, left, right,
+ "Last-Modified")))) {
+ /* contradiction: 304 Not Modified, but entity header modified */
}
- else {
- /* RFC 2616 10.3.5 states that entity headers are not supposed
- * to be in the 304 response. Therefore, we need to combine the
- * response headers with the cached headers *before* we update
- * the cached headers.
- *
- * However, before doing that, we need to first merge in
- * err_headers_out and we also need to strip any hop-by-hop
- * headers that might have snuck in.
- */
- r->headers_out = ap_cache_cacheable_headers_out(r);
-
- /* Merge in our cached headers. However, keep any updated values. */
- cache_accept_headers(cache->handle, r, 1);
+ }
- cache->provider->recall_body(cache->handle, r->pool, bb);
+ /**
+ * Enforce RFC2616 Section 10.3.5, just in case. We caught any
+ * inconsistencies above.
+ *
+ * If the conditional GET used a strong cache validator (see section
+ * 13.3.3), the response SHOULD NOT include other entity-headers.
+ * Otherwise (i.e., the conditional GET used a weak validator), the
+ * response MUST NOT include other entity-headers; this prevents
+ * inconsistencies between cached entity-bodies and updated headers.
+ */
+ if (r->status == HTTP_NOT_MODIFIED) {
+ apr_table_unset(r->headers_out, "Allow");
+ apr_table_unset(r->headers_out, "Content-Encoding");
+ apr_table_unset(r->headers_out, "Content-Language");
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Content-MD5");
+ apr_table_unset(r->headers_out, "Content-Range");
+ apr_table_unset(r->headers_out, "Content-Type");
+ apr_table_unset(r->headers_out, "Last-Modified");
+ }
- bkt = apr_bucket_eos_create(bb->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, bkt);
- }
+ /* Hold the phone. Some servers might allow us to cache a 2xx, but
+ * then make their 304 responses non cacheable. RFC2616 says this:
+ *
+ * If a 304 response indicates an entity not currently cached, then
+ * the cache MUST disregard the response and repeat the request
+ * without the conditional.
+ *
+ * A 304 response with contradictory headers is technically a
+ * different entity, to be safe, we remove the entity from the cache.
+ */
+ if (reason && r->status == HTTP_NOT_MODIFIED && cache->stale_handle) {
- cache->block_response = 1;
+ ap_log_rerror(
+ APLOG_MARK, APLOG_INFO, 0, r, APLOGNO() "cache: %s responded with an uncacheable 304, retrying the request. Reason: %s", r->unparsed_uri, reason);
- /* we've got a cache conditional hit! tell anyone who cares */
- cache_run_cache_status(
- cache->handle,
- r,
- r->headers_out,
- AP_CACHE_REVALIDATE,
- apr_psprintf(
- r->pool,
- "conditional cache hit: 304 was uncacheable though (%s); entity removed",
+ /* we've got a cache conditional miss! tell anyone who cares */
+ cache_run_cache_status(cache->handle, r, r->headers_out, AP_CACHE_MISS,
+ apr_psprintf(r->pool,
+ "conditional cache miss: 304 was uncacheable, entity removed: %s",
reason));
+ /* remove the cached entity immediately, we might cache it again */
+ ap_remove_output_filter(cache->remove_url_filter);
+ cache_remove_url(cache, r);
+
/* let someone else attempt to cache */
cache_remove_lock(conf, cache, r, NULL);
- return ap_pass_brigade(f->next, bb);
+ /* remove this filter from the chain */
+ ap_remove_output_filter(f);
+
+ /* retry without the conditionals */
+ apr_table_unset(r->headers_in, "If-Match");
+ apr_table_unset(r->headers_in, "If-Modified-Since");
+ apr_table_unset(r->headers_in, "If-None-Match");
+ apr_table_unset(r->headers_in, "If-Range");
+ apr_table_unset(r->headers_in, "If-Unmodified-Since");
+
+ ap_internal_redirect(r->uri, r);
+
+ return APR_SUCCESS;
}
if (reason) {
@@ -1186,7 +1314,7 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
if (rv != OK) {
/* we've got a cache miss! tell anyone who cares */
cache_run_cache_status(cache->handle, r, r->headers_out, AP_CACHE_MISS,
- "cache miss: create_entity failed");
+ "cache miss: cache unwilling to store response");
/* Caching layer declined the opportunity to cache the response */
ap_remove_output_filter(f);
@@ -1303,9 +1431,6 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
/* We found a stale entry which wasn't really stale. */
if (cache->stale_handle) {
- /* Load in the saved status and clear the status line. */
- r->status = info->status;
- r->status_line = NULL;
/* RFC 2616 10.3.5 states that entity headers are not supposed
* to be in the 304 response. Therefore, we need to combine the
@@ -1319,7 +1444,9 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
r->headers_out = ap_cache_cacheable_headers_out(r);
/* Merge in our cached headers. However, keep any updated values. */
- cache_accept_headers(cache->handle, r, 1);
+ /* take output, overlay on top of cached */
+ cache_accept_headers(cache->handle, r, r->headers_out,
+ cache->handle->resp_hdrs, 1);
}
/* Write away header information to cache. It is possible that we are
@@ -1342,6 +1469,10 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
apr_bucket *bkt;
int status;
+ /* Load in the saved status and clear the status line. */
+ r->status = info->status;
+ r->status_line = NULL;
+
/* We're just saving response headers, so we are done. Commit
* the response at this point, unless there was a previous error.
*/
@@ -1483,6 +1614,70 @@ static apr_status_t cache_remove_url_filter(ap_filter_t *f,
}
/*
+ * CACHE_INVALIDATE filter
+ * -----------------------
+ *
+ * This filter gets added in the quick handler should a PUT, POST or DELETE
+ * method be detected. If the response is successful, we must invalidate any
+ * cached entity as per RFC2616 section 13.10.
+ *
+ * CACHE_INVALIDATE has to be a protocol filter to ensure that is run even if
+ * the response is a canned error message, which removes the content filters
+ * from the chain.
+ *
+ * CACHE_INVALIDATE expects cache request rec within its context because the
+ * request this filter runs on can be different from the one whose cache entry
+ * should be removed, due to internal redirects.
+ */
+static apr_status_t cache_invalidate_filter(ap_filter_t *f,
+ apr_bucket_brigade *in)
+{
+ request_rec *r = f->r;
+ cache_request_rec *cache;
+
+ /* Setup cache_request_rec */
+ cache = (cache_request_rec *) f->ctx;
+
+ if (!cache) {
+ /* user likely configured CACHE_INVALIDATE manually; they should really
+ * use mod_cache configuration to do that. So:
+ * 1. Remove ourselves
+ * 2. Do nothing and bail out
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02465)
+ "cache: CACHE_INVALIDATE enabled unexpectedly: %s", r->uri);
+ }
+ else {
+
+ if (r->status > 299) {
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02466)
+ "cache: response status to '%s' method is %d (>299), not invalidating cached entity: %s", r->method, r->status, r->uri);
+
+ }
+ else {
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(02467)
+ "cache: Invalidating all cached entities in response to '%s' request for %s",
+ r->method, r->uri);
+
+ cache_invalidate(cache, r);
+
+ /* we've got a cache invalidate! tell everyone who cares */
+ cache_run_cache_status(cache->handle, r, r->headers_out,
+ AP_CACHE_INVALIDATE, apr_psprintf(r->pool,
+ "cache invalidated by %s", r->method));
+
+ }
+
+ }
+
+ /* remove ourselves */
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, in);
+}
+
+/*
* CACHE filter
* ------------
*
@@ -1579,11 +1774,11 @@ static int cache_status(cache_handle_t *h, request_rec *r,
x_cache = conf->x_cache;
}
if (x_cache) {
- apr_table_setn(headers, "X-Cache",
- apr_psprintf(r->pool, "%s from %s",
- status == AP_CACHE_HIT ? "HIT" : status
- == AP_CACHE_REVALIDATE ? "REVALIDATE" : "MISS",
- r->server->server_hostname));
+ apr_table_setn(headers, "X-Cache", apr_psprintf(r->pool, "%s from %s",
+ status == AP_CACHE_HIT ? "HIT"
+ : status == AP_CACHE_REVALIDATE ? "REVALIDATE" : status
+ == AP_CACHE_INVALIDATE ? "INVALIDATE" : "MISS",
+ r->server->server_hostname));
}
if (dconf && dconf->x_cache_detail_set) {
@@ -1640,7 +1835,8 @@ static void cache_insert_error_filter(request_rec *r)
if (cache->stale_handle && cache->save_filter
&& !cache->stale_handle->cache_obj->info.control.must_revalidate
- && !cache->stale_handle->cache_obj->info.control.proxy_revalidate) {
+ && !cache->stale_handle->cache_obj->info.control.proxy_revalidate
+ && !cache->stale_handle->cache_obj->info.control.s_maxage) {
const char *warn_head;
cache_server_conf
*conf =
@@ -1773,7 +1969,7 @@ static void *merge_dir_config(apr_pool_t *p, void *basev, void *addv) {
static void * create_cache_config(apr_pool_t *p, server_rec *s)
{
- const char *tmppath;
+ const char *tmppath = NULL;
cache_server_conf *ps = apr_pcalloc(p, sizeof(cache_server_conf));
/* array of URL prefixes for which caching is enabled */
@@ -2068,7 +2264,7 @@ static const char *add_cache_disable(cmd_parms *parms, void *dummy,
&cache_module);
if (parms->path) {
- if (!strcmp(url, "on")) {
+ if (!strcasecmp(url, "on")) {
dconf->disable = 1;
dconf->disable_set = 1;
return NULL;
@@ -2454,6 +2650,11 @@ static void register_hooks(apr_pool_t *p)
cache_remove_url_filter,
NULL,
AP_FTYPE_PROTOCOL);
+ cache_invalidate_filter_handle =
+ ap_register_output_filter("CACHE_INVALIDATE",
+ cache_invalidate_filter,
+ NULL,
+ AP_FTYPE_PROTOCOL);
ap_hook_post_config(cache_post_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
}
diff --git a/modules/cache/mod_cache_disk.c b/modules/cache/mod_cache_disk.c
index 8427e8fd..2b50aef9 100644
--- a/modules/cache/mod_cache_disk.c
+++ b/modules/cache/mod_cache_disk.c
@@ -80,7 +80,7 @@ static char *header_file(apr_pool_t *p, disk_cache_conf *conf,
}
if (dobj->prefix) {
- return apr_pstrcat(p, dobj->prefix, CACHE_VDIR_SUFFIX, "/",
+ return apr_pstrcat(p, dobj->prefix, CACHE_VDIR_SUFFIX "/",
dobj->hashfile, CACHE_HEADER_SUFFIX, NULL);
}
else {
@@ -98,7 +98,7 @@ static char *data_file(apr_pool_t *p, disk_cache_conf *conf,
}
if (dobj->prefix) {
- return apr_pstrcat(p, dobj->prefix, CACHE_VDIR_SUFFIX, "/",
+ return apr_pstrcat(p, dobj->prefix, CACHE_VDIR_SUFFIX "/",
dobj->hashfile, CACHE_DATA_SUFFIX, NULL);
}
else {
@@ -385,6 +385,7 @@ static int create_entity(cache_handle_t *h, request_rec *r, const char *key, apr
dobj->root_len = conf->cache_root_len;
apr_pool_create(&pool, r->pool);
+ apr_pool_tag(pool, "mod_cache (create_entity)");
file_cache_create(conf, &dobj->hdrs, pool);
file_cache_create(conf, &dobj->vary, pool);
@@ -511,6 +512,7 @@ static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
dobj->name = key;
apr_pool_create(&pool, r->pool);
+ apr_pool_tag(pool, "mod_cache (open_entity)");
file_cache_create(conf, &dobj->hdrs, pool);
file_cache_create(conf, &dobj->vary, pool);
@@ -841,7 +843,7 @@ static apr_status_t read_table(cache_handle_t *handle, request_rec *r,
}
*l++ = '\0';
- while (*l && apr_isspace(*l)) {
+ while (apr_isspace(*l)) {
++l;
}
@@ -940,6 +942,10 @@ static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info
dobj->headers_in = ap_cache_cacheable_headers_in(r);
}
+ if (r->header_only && r->status != HTTP_NOT_MODIFIED) {
+ dobj->disk_info.header_only = 1;
+ }
+
return APR_SUCCESS;
}
@@ -1188,49 +1194,51 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r,
continue;
}
- /* Attempt to create the data file at the last possible moment, if
- * the body is empty, we don't write a file at all, and save an inode.
- */
- if (!dobj->data.tempfd) {
- apr_finfo_t finfo;
- rv = apr_file_mktemp(&dobj->data.tempfd, dobj->data.tempfile,
- APR_CREATE | APR_WRITE | APR_BINARY |
- APR_BUFFERED | APR_EXCL, dobj->data.pool);
+ if (!dobj->disk_info.header_only) {
+
+ /* Attempt to create the data file at the last possible moment, if
+ * the body is empty, we don't write a file at all, and save an inode.
+ */
+ if (!dobj->data.tempfd) {
+ apr_finfo_t finfo;
+ rv = apr_file_mktemp(&dobj->data.tempfd, dobj->data.tempfile,
+ APR_CREATE | APR_WRITE | APR_BINARY | APR_BUFFERED
+ | APR_EXCL, dobj->data.pool);
+ if (rv != APR_SUCCESS) {
+ apr_pool_destroy(dobj->data.pool);
+ return rv;
+ }
+ dobj->file_size = 0;
+ rv = apr_file_info_get(&finfo, APR_FINFO_IDENT,
+ dobj->data.tempfd);
+ if (rv != APR_SUCCESS) {
+ apr_pool_destroy(dobj->data.pool);
+ return rv;
+ }
+ dobj->disk_info.device = finfo.device;
+ dobj->disk_info.inode = finfo.inode;
+ dobj->disk_info.has_body = 1;
+ }
+
+ /* write to the cache, leave if we fail */
+ rv = apr_file_write_full(dobj->data.tempfd, str, length, &written);
if (rv != APR_SUCCESS) {
+ ap_log_rerror(
+ APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00731) "Error when writing cache file for URL %s", h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
return rv;
}
- dobj->file_size = 0;
- rv = apr_file_info_get(&finfo, APR_FINFO_IDENT,
- dobj->data.tempfd);
- if (rv != APR_SUCCESS) {
+ dobj->file_size += written;
+ if (dobj->file_size > dconf->maxfs) {
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00732) "URL %s failed the size check "
+ "(%" APR_OFF_T_FMT ">%" APR_OFF_T_FMT ")", h->cache_obj->key, dobj->file_size, dconf->maxfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
- return rv;
+ return APR_EGENERAL;
}
- dobj->disk_info.device = finfo.device;
- dobj->disk_info.inode = finfo.inode;
- dobj->disk_info.has_body = 1;
- }
- /* write to the cache, leave if we fail */
- rv = apr_file_write_full(dobj->data.tempfd, str, length, &written);
- if (rv != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00731)
- "Error when writing cache file for URL %s",
- h->cache_obj->key);
- /* Remove the intermediate cache file and return non-APR_SUCCESS */
- apr_pool_destroy(dobj->data.pool);
- return rv;
- }
- dobj->file_size += written;
- if (dobj->file_size > dconf->maxfs) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00732)
- "URL %s failed the size check "
- "(%" APR_OFF_T_FMT ">%" APR_OFF_T_FMT ")",
- h->cache_obj->key, dobj->file_size, dconf->maxfs);
- /* Remove the intermediate cache file and return non-APR_SUCCESS */
- apr_pool_destroy(dobj->data.pool);
- return APR_EGENERAL;
}
/* have we reached the limit of how much we're prepared to write in one
@@ -1256,43 +1264,44 @@ static apr_status_t store_body(cache_handle_t *h, request_rec *r,
if (seen_eos) {
const char *cl_header = apr_table_get(r->headers_out, "Content-Length");
- if (dobj->data.tempfd) {
- rv = apr_file_close(dobj->data.tempfd);
- if (rv != APR_SUCCESS) {
- /* Buffered write failed, abandon attempt to write */
- apr_pool_destroy(dobj->data.pool);
- return rv;
+ if (!dobj->disk_info.header_only) {
+
+ if (dobj->data.tempfd) {
+ rv = apr_file_close(dobj->data.tempfd);
+ if (rv != APR_SUCCESS) {
+ /* Buffered write failed, abandon attempt to write */
+ apr_pool_destroy(dobj->data.pool);
+ return rv;
+ }
}
- }
- if (r->connection->aborted || r->no_cache) {
- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00733)
- "Discarding body for URL %s "
- "because connection has been aborted.",
- h->cache_obj->key);
- /* Remove the intermediate cache file and return non-APR_SUCCESS */
- apr_pool_destroy(dobj->data.pool);
- return APR_EGENERAL;
- }
- if (dobj->file_size < dconf->minfs) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00734)
- "URL %s failed the size check "
- "(%" APR_OFF_T_FMT "<%" APR_OFF_T_FMT ")",
- h->cache_obj->key, dobj->file_size, dconf->minfs);
- /* Remove the intermediate cache file and return non-APR_SUCCESS */
- apr_pool_destroy(dobj->data.pool);
- return APR_EGENERAL;
- }
- if (cl_header) {
- apr_int64_t cl = apr_atoi64(cl_header);
- if ((errno == 0) && (dobj->file_size != cl)) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00735)
- "URL %s didn't receive complete response, not caching",
- h->cache_obj->key);
+ if (r->connection->aborted || r->no_cache) {
+ ap_log_rerror(
+ APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00733) "Discarding body for URL %s "
+ "because connection has been aborted.", h->cache_obj->key);
/* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
return APR_EGENERAL;
}
+ if (dobj->file_size < dconf->minfs) {
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00734) "URL %s failed the size check "
+ "(%" APR_OFF_T_FMT "<%" APR_OFF_T_FMT ")", h->cache_obj->key, dobj->file_size, dconf->minfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ apr_pool_destroy(dobj->data.pool);
+ return APR_EGENERAL;
+ }
+ if (cl_header) {
+ apr_int64_t cl = apr_atoi64(cl_header);
+ if ((errno == 0) && (dobj->file_size != cl)) {
+ ap_log_rerror(
+ APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00735) "URL %s didn't receive complete response, not caching", h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ apr_pool_destroy(dobj->data.pool);
+ return APR_EGENERAL;
+ }
+ }
+
}
/* All checks were fine, we're good to go when the commit comes */
@@ -1319,7 +1328,12 @@ static apr_status_t commit_entity(cache_handle_t *h, request_rec *r)
rv = file_cache_el_final(conf, &dobj->vary, r);
}
if (APR_SUCCESS == rv) {
- rv = file_cache_el_final(conf, &dobj->data, r);
+ if (!dobj->disk_info.header_only) {
+ rv = file_cache_el_final(conf, &dobj->data, r);
+ }
+ else if (dobj->data.file){
+ rv = apr_file_remove(dobj->data.file, dobj->data.pool);
+ }
}
/* remove the cached items completely on any failure */
@@ -1342,7 +1356,17 @@ static apr_status_t commit_entity(cache_handle_t *h, request_rec *r)
static apr_status_t invalidate_entity(cache_handle_t *h, request_rec *r)
{
- return APR_ENOTIMPL;
+ apr_status_t rv;
+
+ rv = recall_headers(h, r);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* mark the entity as invalidated */
+ h->cache_obj->info.control.invalidated = 1;
+
+ return commit_entity(h, r);
}
static void *create_dir_config(apr_pool_t *p, char *dummy)
@@ -1447,6 +1471,7 @@ static const char
{
return "CacheMinFileSize argument must be a non-negative integer representing the min size of a file to cache in bytes.";
}
+ dconf->minfs_set = 1;
return NULL;
}
@@ -1460,6 +1485,7 @@ static const char
{
return "CacheMaxFileSize argument must be a non-negative integer representing the max size of a file to cache in bytes.";
}
+ dconf->maxfs_set = 1;
return NULL;
}
diff --git a/modules/cache/mod_cache_socache.c b/modules/cache/mod_cache_socache.c
new file mode 100644
index 00000000..913de2ee
--- /dev/null
+++ b/modules/cache/mod_cache_socache.c
@@ -0,0 +1,1501 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_lib.h"
+#include "apr_file_io.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+#include "http_core.h"
+#include "ap_provider.h"
+#include "ap_socache.h"
+#include "util_filter.h"
+#include "util_script.h"
+#include "util_charset.h"
+#include "util_mutex.h"
+
+#include "mod_cache.h"
+
+#include "cache_socache_common.h"
+
+/*
+ * mod_cache_socache: Shared Object Cache Based HTTP 1.1 Cache.
+ *
+ * Flow to Find the entry:
+ * Incoming client requests URI /foo/bar/baz
+ * Fetch URI key (may contain Format #1 or Format #2)
+ * If format #1 (Contains a list of Vary Headers):
+ * Use each header name (from .header) with our request values (headers_in) to
+ * regenerate key using HeaderName+HeaderValue+.../foo/bar/baz
+ * re-read in key (must be format #2)
+ *
+ * Format #1:
+ * apr_uint32_t format;
+ * apr_time_t expire;
+ * apr_array_t vary_headers (delimited by CRLF)
+ *
+ * Format #2:
+ * cache_socache_info_t (first sizeof(apr_uint32_t) bytes is the format)
+ * entity name (sobj->name) [length is in cache_socache_info_t->name_len]
+ * r->headers_out (delimited by CRLF)
+ * CRLF
+ * r->headers_in (delimited by CRLF)
+ * CRLF
+ */
+
+module AP_MODULE_DECLARE_DATA cache_socache_module;
+
+/*
+ * cache_socache_object_t
+ * Pointed to by cache_object_t::vobj
+ */
+typedef struct cache_socache_object_t
+{
+ apr_pool_t *pool; /* pool */
+ unsigned char *buffer; /* the cache buffer */
+ apr_size_t buffer_len; /* size of the buffer */
+ apr_bucket_brigade *body; /* brigade containing the body, if any */
+ apr_table_t *headers_in; /* Input headers to save */
+ apr_table_t *headers_out; /* Output headers to save */
+ cache_socache_info_t socache_info; /* Header information. */
+ apr_size_t body_offset; /* offset to the start of the body */
+ unsigned int newbody :1; /* whether a new body is present */
+ apr_time_t expire; /* when to expire the entry */
+
+ const char *name; /* Requested URI without vary bits - suitable for mortals. */
+ const char *key; /* On-disk prefix; URI with Vary bits (if present) */
+ apr_off_t file_size; /* File size of the cached data file */
+ apr_off_t offset; /* Max size to set aside */
+ apr_time_t timeout; /* Max time to set aside */
+ unsigned int done :1; /* Is the attempt to cache complete? */
+} cache_socache_object_t;
+
+/*
+ * mod_cache_socache configuration
+ */
+#define DEFAULT_MAX_FILE_SIZE 100*1024
+#define DEFAULT_MAXTIME 86400
+#define DEFAULT_MINTIME 600
+#define DEFAULT_READSIZE 0
+#define DEFAULT_READTIME 0
+
+typedef struct cache_socache_provider_conf
+{
+ const char *args;
+ ap_socache_provider_t *socache_provider;
+ ap_socache_instance_t *socache_instance;
+} cache_socache_provider_conf;
+
+typedef struct cache_socache_conf
+{
+ cache_socache_provider_conf *provider;
+} cache_socache_conf;
+
+typedef struct cache_socache_dir_conf
+{
+ apr_off_t max; /* maximum file size for cached files */
+ apr_time_t maxtime; /* maximum expiry time */
+ apr_time_t mintime; /* minimum expiry time */
+ apr_off_t readsize; /* maximum data to attempt to cache in one go */
+ apr_time_t readtime; /* maximum time taken to cache in one go */
+ unsigned int max_set :1;
+ unsigned int maxtime_set :1;
+ unsigned int mintime_set :1;
+ unsigned int readsize_set :1;
+ unsigned int readtime_set :1;
+} cache_socache_dir_conf;
+
+/* Shared object cache and mutex */
+static const char * const cache_socache_id = "cache-socache";
+static apr_global_mutex_t *socache_mutex = NULL;
+
+/*
+ * Local static functions
+ */
+
+static apr_status_t read_array(request_rec *r, apr_array_header_t *arr,
+ unsigned char *buffer, apr_size_t buffer_len, apr_size_t *slider)
+{
+ apr_size_t val = *slider;
+
+ while (*slider < buffer_len) {
+ if (buffer[*slider] == '\r') {
+ if (val == *slider) {
+ (*slider)++;
+ return APR_SUCCESS;
+ }
+ *((const char **) apr_array_push(arr)) = apr_pstrndup(r->pool,
+ (const char *) buffer + val, *slider - val);
+ (*slider)++;
+ if (buffer[*slider] == '\n') {
+ (*slider)++;
+ }
+ val = *slider;
+ }
+ else if (buffer[*slider] == '\0') {
+ (*slider)++;
+ return APR_SUCCESS;
+ }
+ else {
+ (*slider)++;
+ }
+ }
+
+ return APR_EOF;
+}
+
+static apr_status_t store_array(apr_array_header_t *arr, unsigned char *buffer,
+ apr_size_t buffer_len, apr_size_t *slider)
+{
+ int i, len;
+ const char **elts;
+
+ elts = (const char **) arr->elts;
+
+ for (i = 0; i < arr->nelts; i++) {
+ len = strlen(elts[i]);
+ if (len + 3 >= buffer_len - *slider) {
+ return APR_EOF;
+ }
+ len = apr_snprintf(buffer ? (char *) buffer + *slider : NULL,
+ buffer ? buffer_len - *slider : 0, "%s" CRLF, elts[i]);
+ *slider += len;
+ }
+ if (buffer) {
+ memcpy(buffer + *slider, CRLF, sizeof(CRLF) - 1);
+ }
+ *slider += sizeof(CRLF) - 1;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t read_table(cache_handle_t *handle, request_rec *r,
+ apr_table_t *table, unsigned char *buffer, apr_size_t buffer_len,
+ apr_size_t *slider)
+{
+ apr_size_t key = *slider, colon = 0, len = 0;
+ ;
+
+ while (*slider < buffer_len) {
+ if (buffer[*slider] == ':') {
+ if (!colon) {
+ colon = *slider;
+ }
+ (*slider)++;
+ }
+ else if (buffer[*slider] == '\r') {
+ len = colon;
+ if (key == *slider) {
+ (*slider)++;
+ if (buffer[*slider] == '\n') {
+ (*slider)++;
+ }
+ return APR_SUCCESS;
+ }
+ if (!colon || buffer[colon++] != ':') {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02344)
+ "Premature end of cache headers.");
+ return APR_EGENERAL;
+ }
+ while (apr_isspace(buffer[colon])) {
+ colon++;
+ }
+ apr_table_addn(table, apr_pstrndup(r->pool, (const char *) buffer
+ + key, len - key), apr_pstrndup(r->pool,
+ (const char *) buffer + colon, *slider - colon));
+ (*slider)++;
+ if (buffer[*slider] == '\n') {
+ (*slider)++;
+ }
+ key = *slider;
+ colon = 0;
+ }
+ else if (buffer[*slider] == '\0') {
+ (*slider)++;
+ return APR_SUCCESS;
+ }
+ else {
+ (*slider)++;
+ }
+ }
+
+ return APR_EOF;
+}
+
+static apr_status_t store_table(apr_table_t *table, unsigned char *buffer,
+ apr_size_t buffer_len, apr_size_t *slider)
+{
+ int i, len;
+ apr_table_entry_t *elts;
+
+ elts = (apr_table_entry_t *) apr_table_elts(table)->elts;
+ for (i = 0; i < apr_table_elts(table)->nelts; ++i) {
+ if (elts[i].key != NULL) {
+ apr_size_t key_len = strlen(elts[i].key);
+ apr_size_t val_len = strlen(elts[i].val);
+ if (key_len + val_len + 5 >= buffer_len - *slider) {
+ return APR_EOF;
+ }
+ len = apr_snprintf(buffer ? (char *) buffer + *slider : NULL,
+ buffer ? buffer_len - *slider : 0, "%s: %s" CRLF,
+ elts[i].key, elts[i].val);
+ *slider += len;
+ }
+ }
+ if (3 >= buffer_len - *slider) {
+ return APR_EOF;
+ }
+ if (buffer) {
+ memcpy(buffer + *slider, CRLF, sizeof(CRLF) - 1);
+ }
+ *slider += sizeof(CRLF) - 1;
+
+ return APR_SUCCESS;
+}
+
+static const char* regen_key(apr_pool_t *p, apr_table_t *headers,
+ apr_array_header_t *varray, const char *oldkey)
+{
+ struct iovec *iov;
+ int i, k;
+ int nvec;
+ const char *header;
+ const char **elts;
+
+ nvec = (varray->nelts * 2) + 1;
+ iov = apr_palloc(p, sizeof(struct iovec) * nvec);
+ elts = (const char **) varray->elts;
+
+ /* TODO:
+ * - Handle multiple-value headers better. (sort them?)
+ * - Handle Case in-sensitive Values better.
+ * This isn't the end of the world, since it just lowers the cache
+ * hit rate, but it would be nice to fix.
+ *
+ * The majority are case insenstive if they are values (encoding etc).
+ * Most of rfc2616 is case insensitive on header contents.
+ *
+ * So the better solution may be to identify headers which should be
+ * treated case-sensitive?
+ * HTTP URI's (3.2.3) [host and scheme are insensitive]
+ * HTTP method (5.1.1)
+ * HTTP-date values (3.3.1)
+ * 3.7 Media Types [exerpt]
+ * The type, subtype, and parameter attribute names are case-
+ * insensitive. Parameter values might or might not be case-sensitive,
+ * depending on the semantics of the parameter name.
+ * 4.20 Except [exerpt]
+ * Comparison of expectation values is case-insensitive for unquoted
+ * tokens (including the 100-continue token), and is case-sensitive for
+ * quoted-string expectation-extensions.
+ */
+
+ for (i = 0, k = 0; i < varray->nelts; i++) {
+ header = apr_table_get(headers, elts[i]);
+ if (!header) {
+ header = "";
+ }
+ iov[k].iov_base = (char*) elts[i];
+ iov[k].iov_len = strlen(elts[i]);
+ k++;
+ iov[k].iov_base = (char*) header;
+ iov[k].iov_len = strlen(header);
+ k++;
+ }
+ iov[k].iov_base = (char*) oldkey;
+ iov[k].iov_len = strlen(oldkey);
+ k++;
+
+ return apr_pstrcatv(p, iov, k, NULL);
+}
+
+static int array_alphasort(const void *fn1, const void *fn2)
+{
+ return strcmp(*(char**) fn1, *(char**) fn2);
+}
+
+static void tokens_to_array(apr_pool_t *p, const char *data,
+ apr_array_header_t *arr)
+{
+ char *token;
+
+ while ((token = ap_get_list_item(p, &data)) != NULL) {
+ *((const char **) apr_array_push(arr)) = token;
+ }
+
+ /* Sort it so that "Vary: A, B" and "Vary: B, A" are stored the same. */
+ qsort((void *) arr->elts, arr->nelts, sizeof(char *), array_alphasort);
+}
+
+/*
+ * Hook and mod_cache callback functions
+ */
+static int create_entity(cache_handle_t *h, request_rec *r, const char *key,
+ apr_off_t len, apr_bucket_brigade *bb)
+{
+ cache_socache_dir_conf *dconf =
+ ap_get_module_config(r->per_dir_config, &cache_socache_module);
+ cache_socache_conf *conf = ap_get_module_config(r->server->module_config,
+ &cache_socache_module);
+ cache_object_t *obj;
+ cache_socache_object_t *sobj;
+ apr_size_t total;
+
+ if (conf->provider == NULL) {
+ return DECLINED;
+ }
+
+ /* we don't support caching of range requests (yet) */
+ /* TODO: but we could */
+ if (r->status == HTTP_PARTIAL_CONTENT) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02345)
+ "URL %s partial content response not cached",
+ key);
+ return DECLINED;
+ }
+
+ /*
+ * We have a chicken and egg problem. We don't know until we
+ * attempt to store_headers just how big the response will be
+ * and whether it will fit in the cache limits set. But we
+ * need to make a decision now as to whether we plan to try.
+ * If we make the wrong decision, we could prevent another
+ * cache implementation, such as cache_disk, from getting the
+ * opportunity to cache, and that would be unfortunate.
+ *
+ * In a series of tests, from cheapest to most expensive,
+ * decide whether or not to ignore this attempt to cache,
+ * with a small margin just to be sure.
+ */
+ if (len < 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02346)
+ "URL '%s' had no explicit size, ignoring", key);
+ return DECLINED;
+ }
+ if (len > dconf->max) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02347)
+ "URL '%s' body larger than limit, ignoring "
+ "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")",
+ key, len, dconf->max);
+ return DECLINED;
+ }
+
+ /* estimate the total cached size, given current headers */
+ total = len + sizeof(cache_socache_info_t) + strlen(key);
+ if (APR_SUCCESS != store_table(r->headers_out, NULL, dconf->max, &total)
+ || APR_SUCCESS != store_table(r->headers_in, NULL, dconf->max,
+ &total)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02348)
+ "URL '%s' estimated headers size larger than limit, ignoring "
+ "(%" APR_SIZE_T_FMT " > %" APR_OFF_T_FMT ")",
+ key, total, dconf->max);
+ return DECLINED;
+ }
+
+ if (total >= dconf->max) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02349)
+ "URL '%s' body and headers larger than limit, ignoring "
+ "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")",
+ key, len, dconf->max);
+ return DECLINED;
+ }
+
+ /* Allocate and initialize cache_object_t and cache_socache_object_t */
+ h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(*obj));
+ obj->vobj = sobj = apr_pcalloc(r->pool, sizeof(*sobj));
+
+ obj->key = apr_pstrdup(r->pool, key);
+ sobj->key = obj->key;
+ sobj->name = obj->key;
+
+ return OK;
+}
+
+static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
+{
+ cache_socache_dir_conf *dconf =
+ ap_get_module_config(r->per_dir_config, &cache_socache_module);
+ cache_socache_conf *conf = ap_get_module_config(r->server->module_config,
+ &cache_socache_module);
+ apr_uint32_t format;
+ apr_size_t slider;
+ unsigned int buffer_len;
+ const char *nkey;
+ apr_status_t rc;
+ cache_object_t *obj;
+ cache_info *info;
+ cache_socache_object_t *sobj;
+ apr_size_t len;
+
+ nkey = NULL;
+ h->cache_obj = NULL;
+
+ if (!conf->provider || !conf->provider->socache_instance) {
+ return DECLINED;
+ }
+
+ /* Create and init the cache object */
+ obj = apr_pcalloc(r->pool, sizeof(cache_object_t));
+ sobj = apr_pcalloc(r->pool, sizeof(cache_socache_object_t));
+
+ info = &(obj->info);
+
+ /* Create a temporary pool for the buffer, and destroy it if something
+ * goes wrong so we don't have large buffers of unused memory hanging
+ * about for the lifetime of the response.
+ */
+ apr_pool_create(&sobj->pool, r->pool);
+
+ sobj->buffer = apr_palloc(sobj->pool, dconf->max + 1);
+ sobj->buffer_len = dconf->max + 1;
+
+ /* attempt to retrieve the cached entry */
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_lock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02350)
+ "could not acquire lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+ buffer_len = sobj->buffer_len;
+ rc = conf->provider->socache_provider->retrieve(
+ conf->provider->socache_instance, r->server, (unsigned char *) key,
+ strlen(key), sobj->buffer, &buffer_len, r->pool);
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_unlock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02351)
+ "could not release lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+ if (rc != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rc, r, APLOGNO(02352)
+ "Key not found in cache: %s", key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ if (buffer_len >= sobj->buffer_len) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rc, r, APLOGNO(02353)
+ "Key found in cache but too big, ignoring: %s", key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+
+ /* read the format from the cache file */
+ memcpy(&format, sobj->buffer, sizeof(format));
+ slider = sizeof(format);
+
+ if (format == CACHE_SOCACHE_VARY_FORMAT_VERSION) {
+ apr_array_header_t* varray;
+ apr_time_t expire;
+
+ memcpy(&expire, sobj->buffer + slider, sizeof(expire));
+ slider += sizeof(expire);
+
+ varray = apr_array_make(r->pool, 5, sizeof(char*));
+ rc = read_array(r, varray, sobj->buffer, buffer_len, &slider);
+ if (rc != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02354)
+ "Cannot parse vary entry for key: %s", key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+
+ nkey = regen_key(r->pool, r->headers_in, varray, key);
+
+ /* attempt to retrieve the cached entry */
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_lock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02355)
+ "could not acquire lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+ buffer_len = sobj->buffer_len;
+ rc = conf->provider->socache_provider->retrieve(
+ conf->provider->socache_instance, r->server,
+ (unsigned char *) nkey, strlen(nkey), sobj->buffer,
+ &buffer_len, r->pool);
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_unlock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02356)
+ "could not release lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+ if (rc != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rc, r, APLOGNO(02357)
+ "Key not found in cache: %s", key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ if (buffer_len >= sobj->buffer_len) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rc, r, APLOGNO(02358)
+ "Key found in cache but too big, ignoring: %s", key);
+ goto fail;
+ }
+
+ }
+ else if (format != CACHE_SOCACHE_DISK_FORMAT_VERSION) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02359)
+ "Key '%s' found in cache has version %d, expected %d, ignoring",
+ key, format, CACHE_SOCACHE_DISK_FORMAT_VERSION);
+ goto fail;
+ }
+ else {
+ nkey = key;
+ }
+
+ obj->key = nkey;
+ sobj->key = nkey;
+ sobj->name = key;
+
+ if (buffer_len >= sizeof(cache_socache_info_t)) {
+ memcpy(&sobj->socache_info, sobj->buffer, sizeof(cache_socache_info_t));
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02360)
+ "Cache entry for key '%s' too short, removing", nkey);
+ goto fail;
+ }
+ slider = sizeof(cache_socache_info_t);
+
+ /* Store it away so we can get it later. */
+ info->status = sobj->socache_info.status;
+ info->date = sobj->socache_info.date;
+ info->expire = sobj->socache_info.expire;
+ info->request_time = sobj->socache_info.request_time;
+ info->response_time = sobj->socache_info.response_time;
+
+ memcpy(&info->control, &sobj->socache_info.control, sizeof(cache_control_t));
+
+ if (sobj->socache_info.name_len <= buffer_len - slider) {
+ if (strncmp((const char *) sobj->buffer + slider, sobj->name,
+ sobj->socache_info.name_len)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02361)
+ "Cache entry for key '%s' URL mismatch, ignoring", nkey);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ slider += sobj->socache_info.name_len;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02362)
+ "Cache entry for key '%s' too short, removing", nkey);
+ goto fail;
+ }
+
+ /* Is this a cached HEAD request? */
+ if (sobj->socache_info.header_only && !r->header_only) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(02363)
+ "HEAD request cached, non-HEAD requested, ignoring: %s",
+ sobj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+
+ h->req_hdrs = apr_table_make(r->pool, 20);
+ h->resp_hdrs = apr_table_make(r->pool, 20);
+
+ /* Call routine to read the header lines/status line */
+ if (APR_SUCCESS != read_table(h, r, h->resp_hdrs, sobj->buffer, buffer_len,
+ &slider)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02364)
+ "Cache entry for key '%s' response headers unreadable, removing", nkey);
+ goto fail;
+ }
+ if (APR_SUCCESS != read_table(h, r, h->req_hdrs, sobj->buffer, buffer_len,
+ &slider)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02365)
+ "Cache entry for key '%s' request headers unreadable, removing", nkey);
+ goto fail;
+ }
+
+ /* Retrieve the body if we have one */
+ sobj->body = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ len = buffer_len - slider;
+
+ /*
+ * Optimisation: if the body is small, we want to make a
+ * copy of the body and free the temporary pool, as we
+ * don't want large blocks of unused memory hanging around
+ * to the end of the response. In contrast, if the body is
+ * large, we would rather leave the body where it is in the
+ * temporary pool, and save ourselves the copy.
+ */
+ if (len * 2 > dconf->max) {
+ apr_bucket *e;
+
+ /* large - use the brigade as is, we're done */
+ e = apr_bucket_immortal_create((const char *) sobj->buffer + slider,
+ len, r->connection->bucket_alloc);
+
+ APR_BRIGADE_INSERT_TAIL(sobj->body, e);
+ }
+ else {
+
+ /* small - make a copy of the data... */
+ apr_brigade_write(sobj->body, NULL, NULL, (const char *) sobj->buffer
+ + slider, len);
+
+ /* ...and get rid of the large memory buffer */
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ }
+
+ /* make the configuration stick */
+ h->cache_obj = obj;
+ obj->vobj = sobj;
+
+ return OK;
+
+fail:
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_lock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02366)
+ "could not acquire lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+ conf->provider->socache_provider->remove(
+ conf->provider->socache_instance, r->server,
+ (unsigned char *) nkey, strlen(nkey), r->pool);
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_unlock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02367)
+ "could not release lock, ignoring: %s", obj->key);
+ }
+ }
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+}
+
+static int remove_entity(cache_handle_t *h)
+{
+ /* Null out the cache object pointer so next time we start from scratch */
+ h->cache_obj = NULL;
+ return OK;
+}
+
+static int remove_url(cache_handle_t *h, request_rec *r)
+{
+ cache_socache_conf *conf = ap_get_module_config(r->server->module_config,
+ &cache_socache_module);
+ cache_socache_object_t *sobj;
+
+ sobj = (cache_socache_object_t *) h->cache_obj->vobj;
+ if (!sobj) {
+ return DECLINED;
+ }
+
+ /* Remove the key from the cache */
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_lock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02368)
+ "could not acquire lock, ignoring: %s", sobj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+ conf->provider->socache_provider->remove(conf->provider->socache_instance,
+ r->server, (unsigned char *) sobj->key, strlen(sobj->key), r->pool);
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_unlock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02369)
+ "could not release lock, ignoring: %s", sobj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+
+ return OK;
+}
+
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
+{
+ /* we recalled the headers during open_entity, so do nothing */
+ return APR_SUCCESS;
+}
+
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p,
+ apr_bucket_brigade *bb)
+{
+ cache_socache_object_t *sobj = (cache_socache_object_t*) h->cache_obj->vobj;
+ apr_bucket *e;
+
+ e = APR_BRIGADE_FIRST(sobj->body);
+
+ if (e != APR_BRIGADE_SENTINEL(sobj->body)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r,
+ cache_info *info)
+{
+ cache_socache_dir_conf *dconf =
+ ap_get_module_config(r->per_dir_config, &cache_socache_module);
+ cache_socache_conf *conf = ap_get_module_config(r->server->module_config,
+ &cache_socache_module);
+ apr_size_t slider;
+ apr_status_t rv;
+ cache_object_t *obj = h->cache_obj;
+ cache_socache_object_t *sobj = (cache_socache_object_t*) obj->vobj;
+ cache_socache_info_t *socache_info;
+
+ memcpy(&h->cache_obj->info, info, sizeof(cache_info));
+
+ if (r->headers_out) {
+ sobj->headers_out = ap_cache_cacheable_headers_out(r);
+ }
+
+ if (r->headers_in) {
+ sobj->headers_in = ap_cache_cacheable_headers_in(r);
+ }
+
+ sobj->expire
+ = obj->info.expire > r->request_time + dconf->maxtime ? r->request_time
+ + dconf->maxtime
+ : obj->info.expire + dconf->mintime;
+
+ apr_pool_create(&sobj->pool, r->pool);
+
+ sobj->buffer = apr_palloc(sobj->pool, dconf->max);
+ sobj->buffer_len = dconf->max;
+ socache_info = (cache_socache_info_t *) sobj->buffer;
+
+ if (sobj->headers_out) {
+ const char *vary;
+
+ vary = apr_table_get(sobj->headers_out, "Vary");
+
+ if (vary) {
+ apr_array_header_t* varray;
+ apr_uint32_t format = CACHE_SOCACHE_VARY_FORMAT_VERSION;
+
+ memcpy(sobj->buffer, &format, sizeof(format));
+ slider = sizeof(format);
+
+ memcpy(sobj->buffer + slider, &obj->info.expire,
+ sizeof(obj->info.expire));
+ slider += sizeof(obj->info.expire);
+
+ varray = apr_array_make(r->pool, 6, sizeof(char*));
+ tokens_to_array(r->pool, vary, varray);
+
+ if (APR_SUCCESS != (rv = store_array(varray, sobj->buffer,
+ sobj->buffer_len, &slider))) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02370)
+ "buffer too small for Vary array, caching aborted: %s",
+ obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return rv;
+ }
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_lock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02371)
+ "could not acquire lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return status;
+ }
+ }
+ rv = conf->provider->socache_provider->store(
+ conf->provider->socache_instance, r->server,
+ (unsigned char *) obj->key, strlen(obj->key), sobj->expire,
+ (unsigned char *) sobj->buffer, (unsigned int) slider,
+ sobj->pool);
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_unlock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02372)
+ "could not release lock, ignoring: %s", obj->key);
+ }
+ }
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(02373)
+ "Vary not written to cache, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return rv;
+ }
+
+ obj->key = sobj->key = regen_key(r->pool, sobj->headers_in, varray,
+ sobj->name);
+ }
+ }
+
+ socache_info->format = CACHE_SOCACHE_DISK_FORMAT_VERSION;
+ socache_info->date = obj->info.date;
+ socache_info->expire = obj->info.expire;
+ socache_info->entity_version = sobj->socache_info.entity_version++;
+ socache_info->request_time = obj->info.request_time;
+ socache_info->response_time = obj->info.response_time;
+ socache_info->status = obj->info.status;
+
+ if (r->header_only && r->status != HTTP_NOT_MODIFIED) {
+ socache_info->header_only = 1;
+ }
+ else {
+ socache_info->header_only = sobj->socache_info.header_only;
+ }
+
+ socache_info->name_len = strlen(sobj->name);
+
+ memcpy(&socache_info->control, &obj->info.control, sizeof(cache_control_t));
+ slider = sizeof(cache_socache_info_t);
+
+ if (slider + socache_info->name_len >= sobj->buffer_len) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02374)
+ "cache buffer too small for name: %s",
+ sobj->name);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return APR_EGENERAL;
+ }
+ memcpy(sobj->buffer + slider, sobj->name, socache_info->name_len);
+ slider += socache_info->name_len;
+
+ if (sobj->headers_out) {
+ if (APR_SUCCESS != store_table(sobj->headers_out, sobj->buffer,
+ sobj->buffer_len, &slider)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02375)
+ "out-headers didn't fit in buffer: %s", sobj->name);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return APR_EGENERAL;
+ }
+ }
+
+ /* Parse the vary header and dump those fields from the headers_in. */
+ /* TODO: Make call to the same thing cache_select calls to crack Vary. */
+ if (sobj->headers_in) {
+ if (APR_SUCCESS != store_table(sobj->headers_in, sobj->buffer,
+ sobj->buffer_len, &slider)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r, APLOGNO(02376)
+ "in-headers didn't fit in buffer %s",
+ sobj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return APR_EGENERAL;
+ }
+ }
+
+ sobj->body_offset = slider;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t store_body(cache_handle_t *h, request_rec *r,
+ apr_bucket_brigade *in, apr_bucket_brigade *out)
+{
+ apr_bucket *e;
+ apr_status_t rv = APR_SUCCESS;
+ cache_socache_object_t *sobj =
+ (cache_socache_object_t *) h->cache_obj->vobj;
+ cache_socache_dir_conf *dconf =
+ ap_get_module_config(r->per_dir_config, &cache_socache_module);
+ int seen_eos = 0;
+
+ if (!sobj->offset) {
+ sobj->offset = dconf->readsize;
+ }
+ if (!sobj->timeout && dconf->readtime) {
+ sobj->timeout = apr_time_now() + dconf->readtime;
+ }
+
+ if (!sobj->newbody) {
+ if (sobj->body) {
+ apr_brigade_cleanup(sobj->body);
+ }
+ else {
+ sobj->body = apr_brigade_create(r->pool,
+ r->connection->bucket_alloc);
+ }
+ sobj->newbody = 1;
+ }
+ if (sobj->offset) {
+ apr_brigade_partition(in, sobj->offset, &e);
+ }
+
+ while (APR_SUCCESS == rv && !APR_BRIGADE_EMPTY(in)) {
+ const char *str;
+ apr_size_t length;
+
+ e = APR_BRIGADE_FIRST(in);
+
+ /* are we done completely? if so, pass any trailing buckets right through */
+ if (sobj->done || !sobj->pool) {
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(out, e);
+ continue;
+ }
+
+ /* have we seen eos yet? */
+ if (APR_BUCKET_IS_EOS(e)) {
+ seen_eos = 1;
+ sobj->done = 1;
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(out, e);
+ break;
+ }
+
+ /* honour flush buckets, we'll get called again */
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(out, e);
+ break;
+ }
+
+ /* metadata buckets are preserved as is */
+ if (APR_BUCKET_IS_METADATA(e)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(out, e);
+ continue;
+ }
+
+ /* read the bucket, write to the cache */
+ rv = apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(out, e);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02377)
+ "Error when reading bucket for URL %s",
+ h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return rv;
+ }
+
+ /* don't write empty buckets to the cache */
+ if (!length) {
+ continue;
+ }
+
+ sobj->file_size += length;
+ if (sobj->file_size >= sobj->buffer_len - sobj->body_offset) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02378)
+ "URL %s failed the buffer size check "
+ "(%" APR_OFF_T_FMT ">=%" APR_SIZE_T_FMT ")",
+ h->cache_obj->key, sobj->file_size, sobj->buffer_len - sobj->body_offset);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return APR_EGENERAL;
+ }
+
+ rv = apr_bucket_copy(e, &e);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02379)
+ "Error when copying bucket for URL %s",
+ h->cache_obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return rv;
+ }
+ APR_BRIGADE_INSERT_TAIL(sobj->body, e);
+
+ /* have we reached the limit of how much we're prepared to write in one
+ * go? If so, leave, we'll get called again. This prevents us from trying
+ * to swallow too much data at once, or taking so long to write the data
+ * the client times out.
+ */
+ sobj->offset -= length;
+ if (sobj->offset <= 0) {
+ sobj->offset = 0;
+ break;
+ }
+ if ((dconf->readtime && apr_time_now() > sobj->timeout)) {
+ sobj->timeout = 0;
+ break;
+ }
+
+ }
+
+ /* Was this the final bucket? If yes, perform sanity checks.
+ */
+ if (seen_eos) {
+ const char *cl_header = apr_table_get(r->headers_out, "Content-Length");
+
+ if (r->connection->aborted || r->no_cache) {
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02380)
+ "Discarding body for URL %s "
+ "because connection has been aborted.",
+ h->cache_obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return APR_EGENERAL;
+ }
+ if (cl_header) {
+ apr_int64_t cl = apr_atoi64(cl_header);
+ if ((errno == 0) && (sobj->file_size != cl)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02381)
+ "URL %s didn't receive complete response, not caching",
+ h->cache_obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return APR_EGENERAL;
+ }
+ }
+
+ /* All checks were fine, we're good to go when the commit comes */
+
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t commit_entity(cache_handle_t *h, request_rec *r)
+{
+ cache_socache_conf *conf = ap_get_module_config(r->server->module_config,
+ &cache_socache_module);
+ cache_object_t *obj = h->cache_obj;
+ cache_socache_object_t *sobj = (cache_socache_object_t *) obj->vobj;
+ apr_status_t rv;
+ apr_size_t len;
+
+ /* flatten the body into the buffer */
+ len = sobj->buffer_len - sobj->body_offset;
+ rv = apr_brigade_flatten(sobj->body, (char *) sobj->buffer
+ + sobj->body_offset, &len);
+ if (APR_SUCCESS != rv) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02382)
+ "could not flatten brigade, not caching: %s",
+ sobj->key);
+ goto fail;
+ }
+ if (len >= sobj->buffer_len - sobj->body_offset) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02383)
+ "body too big for the cache buffer, not caching: %s",
+ h->cache_obj->key);
+ goto fail;
+ }
+
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_lock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02384)
+ "could not acquire lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return rv;
+ }
+ }
+ rv = conf->provider->socache_provider->store(
+ conf->provider->socache_instance, r->server,
+ (unsigned char *) sobj->key, strlen(sobj->key), sobj->expire,
+ sobj->buffer, (unsigned int) sobj->body_offset + len, sobj->pool);
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_unlock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02385)
+ "could not release lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return DECLINED;
+ }
+ }
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r, APLOGNO(02386)
+ "could not write to cache, ignoring: %s", sobj->key);
+ goto fail;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02387)
+ "commit_entity: Headers and body for URL %s cached for maximum of %d seconds.",
+ sobj->name, (apr_uint32_t)apr_time_sec(sobj->expire - r->request_time));
+
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+
+ return APR_SUCCESS;
+
+fail:
+ /* For safety, remove any existing entry on failure, just in case it could not
+ * be revalidated successfully.
+ */
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_lock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02388)
+ "could not acquire lock, ignoring: %s", obj->key);
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return rv;
+ }
+ }
+ conf->provider->socache_provider->remove(conf->provider->socache_instance,
+ r->server, (unsigned char *) sobj->key, strlen(sobj->key), r->pool);
+ if (socache_mutex) {
+ apr_status_t status = apr_global_mutex_unlock(socache_mutex);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02389)
+ "could not release lock, ignoring: %s", obj->key);
+ }
+ }
+
+ apr_pool_destroy(sobj->pool);
+ sobj->pool = NULL;
+ return rv;
+}
+
+static apr_status_t invalidate_entity(cache_handle_t *h, request_rec *r)
+{
+ /* mark the entity as invalidated */
+ h->cache_obj->info.control.invalidated = 1;
+
+ return commit_entity(h, r);
+}
+
+static void *create_dir_config(apr_pool_t *p, char *dummy)
+{
+ cache_socache_dir_conf *dconf =
+ apr_pcalloc(p, sizeof(cache_socache_dir_conf));
+
+ dconf->max = DEFAULT_MAX_FILE_SIZE;
+ dconf->maxtime = apr_time_from_sec(DEFAULT_MAXTIME);
+ dconf->mintime = apr_time_from_sec(DEFAULT_MINTIME);
+ dconf->readsize = DEFAULT_READSIZE;
+ dconf->readtime = DEFAULT_READTIME;
+
+ return dconf;
+}
+
+static void *merge_dir_config(apr_pool_t *p, void *basev, void *addv)
+{
+ cache_socache_dir_conf
+ *new =
+ (cache_socache_dir_conf *) apr_pcalloc(p, sizeof(cache_socache_dir_conf));
+ cache_socache_dir_conf *add = (cache_socache_dir_conf *) addv;
+ cache_socache_dir_conf *base = (cache_socache_dir_conf *) basev;
+
+ new->max = (add->max_set == 0) ? base->max : add->max;
+ new->max_set = add->max_set || base->max_set;
+ new->maxtime = (add->maxtime_set == 0) ? base->maxtime : add->maxtime;
+ new->maxtime_set = add->maxtime_set || base->maxtime_set;
+ new->mintime = (add->mintime_set == 0) ? base->mintime : add->mintime;
+ new->mintime_set = add->mintime_set || base->mintime_set;
+ new->readsize = (add->readsize_set == 0) ? base->readsize : add->readsize;
+ new->readsize_set = add->readsize_set || base->readsize_set;
+ new->readtime = (add->readtime_set == 0) ? base->readtime : add->readtime;
+ new->readtime_set = add->readtime_set || base->readtime_set;
+
+ return new;
+}
+
+static void *create_config(apr_pool_t *p, server_rec *s)
+{
+ cache_socache_conf *conf = apr_pcalloc(p, sizeof(cache_socache_conf));
+
+ return conf;
+}
+
+static void *merge_config(apr_pool_t *p, void *basev, void *overridesv)
+{
+ cache_socache_conf *ps = apr_pcalloc(p, sizeof(cache_socache_conf));
+ cache_socache_conf *base = (cache_socache_conf *) basev;
+ cache_socache_conf *overrides = (cache_socache_conf *) overridesv;
+
+ ps = overrides ? overrides : base;
+
+ return ps;
+}
+
+/*
+ * mod_cache_socache configuration directives handlers.
+ */
+static const char *set_cache_socache(cmd_parms *cmd, void *in_struct_ptr,
+ const char *arg)
+{
+ cache_socache_conf *conf = ap_get_module_config(cmd->server->module_config,
+ &cache_socache_module);
+ cache_socache_provider_conf *provider = conf->provider
+ = apr_pcalloc(cmd->pool, sizeof(cache_socache_provider_conf));
+
+ const char *err = NULL, *sep, *name;
+
+ /* Argument is of form 'name:args' or just 'name'. */
+ sep = ap_strchr_c(arg, ':');
+ if (sep) {
+ name = apr_pstrmemdup(cmd->pool, arg, sep - arg);
+ sep++;
+ provider->args = sep;
+ }
+ else {
+ name = arg;
+ }
+
+ provider->socache_provider = ap_lookup_provider(AP_SOCACHE_PROVIDER_GROUP,
+ name, AP_SOCACHE_PROVIDER_VERSION);
+ if (provider->socache_provider == NULL) {
+ err = apr_psprintf(cmd->pool,
+ "Unknown socache provider '%s'. Maybe you need "
+ "to load the appropriate socache module "
+ "(mod_socache_%s?)", name, name);
+ }
+ return err;
+}
+
+static const char *set_cache_max(cmd_parms *parms, void *in_struct_ptr,
+ const char *arg)
+{
+ cache_socache_dir_conf *dconf = (cache_socache_dir_conf *) in_struct_ptr;
+
+ if (apr_strtoff(&dconf->max, arg, NULL, 10) != APR_SUCCESS || dconf->max
+ < 1024) {
+ return "CacheSocacheMaxSize argument must be a integer representing the max size of a cached entry (headers and body), at least 1024";
+ }
+ dconf->max_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_maxtime(cmd_parms *parms, void *in_struct_ptr,
+ const char *arg)
+{
+ cache_socache_dir_conf *dconf = (cache_socache_dir_conf *) in_struct_ptr;
+ apr_off_t seconds;
+
+ if (apr_strtoff(&seconds, arg, NULL, 10) != APR_SUCCESS || seconds < 0) {
+ return "CacheSocacheMaxTime argument must be the maximum amount of time in seconds to cache an entry.";
+ }
+ dconf->maxtime = apr_time_from_sec(seconds);
+ dconf->maxtime_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_mintime(cmd_parms *parms, void *in_struct_ptr,
+ const char *arg)
+{
+ cache_socache_dir_conf *dconf = (cache_socache_dir_conf *) in_struct_ptr;
+ apr_off_t seconds;
+
+ if (apr_strtoff(&seconds, arg, NULL, 10) != APR_SUCCESS || seconds < 0) {
+ return "CacheSocacheMinTime argument must be the minimum amount of time in seconds to cache an entry.";
+ }
+ dconf->mintime = apr_time_from_sec(seconds);
+ dconf->mintime_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_readsize(cmd_parms *parms, void *in_struct_ptr,
+ const char *arg)
+{
+ cache_socache_dir_conf *dconf = (cache_socache_dir_conf *) in_struct_ptr;
+
+ if (apr_strtoff(&dconf->readsize, arg, NULL, 10) != APR_SUCCESS
+ || dconf->readsize < 0) {
+ return "CacheSocacheReadSize argument must be a non-negative integer representing the max amount of data to cache in go.";
+ }
+ dconf->readsize_set = 1;
+ return NULL;
+}
+
+static const char *set_cache_readtime(cmd_parms *parms, void *in_struct_ptr,
+ const char *arg)
+{
+ cache_socache_dir_conf *dconf = (cache_socache_dir_conf *) in_struct_ptr;
+ apr_off_t milliseconds;
+
+ if (apr_strtoff(&milliseconds, arg, NULL, 10) != APR_SUCCESS
+ || milliseconds < 0) {
+ return "CacheSocacheReadTime argument must be a non-negative integer representing the max amount of time taken to cache in go.";
+ }
+ dconf->readtime = apr_time_from_msec(milliseconds);
+ dconf->readtime_set = 1;
+ return NULL;
+}
+
+static apr_status_t remove_lock(void *data)
+{
+ if (socache_mutex) {
+ apr_global_mutex_destroy(socache_mutex);
+ socache_mutex = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t destroy_cache(void *data)
+{
+ server_rec *s = data;
+ cache_socache_conf *conf =
+ ap_get_module_config(s->module_config, &cache_socache_module);
+ if (conf->provider && conf->provider->socache_instance) {
+ conf->provider->socache_provider->destroy(
+ conf->provider->socache_instance, s);
+ conf->provider->socache_instance = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static int socache_precfg(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptmp)
+{
+ apr_status_t rv = ap_mutex_register(pconf, cache_socache_id, NULL,
+ APR_LOCK_DEFAULT, 0);
+ if (rv != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02390)
+ "failed to register %s mutex", cache_socache_id);
+ return 500; /* An HTTP status would be a misnomer! */
+ }
+ return OK;
+}
+
+static int socache_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptmp, server_rec *base_server)
+{
+ server_rec *s;
+ apr_status_t rv;
+ const char *errmsg;
+ static struct ap_socache_hints socache_hints =
+ { 64, 32, 60000000 };
+
+ for (s = base_server; s; s = s->next) {
+ cache_socache_conf *conf =
+ ap_get_module_config(s->module_config, &cache_socache_module);
+
+ if (!conf->provider) {
+ continue;
+ }
+
+ if (!socache_mutex && conf->provider->socache_provider->flags
+ & AP_SOCACHE_FLAG_NOTMPSAFE) {
+
+ rv = ap_global_mutex_create(&socache_mutex, NULL, cache_socache_id,
+ NULL, s, pconf, 0);
+ if (rv != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02391)
+ "failed to create %s mutex", cache_socache_id);
+ return 500; /* An HTTP status would be a misnomer! */
+ }
+ apr_pool_cleanup_register(pconf, NULL, remove_lock,
+ apr_pool_cleanup_null);
+ }
+
+ errmsg = conf->provider->socache_provider->create(
+ &conf->provider->socache_instance, conf->provider->args, ptmp,
+ pconf);
+ if (errmsg) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, 0, plog,
+ APLOGNO(02392) "%s", errmsg);
+ return 500; /* An HTTP status would be a misnomer! */
+ }
+
+ rv = conf->provider->socache_provider->init(
+ conf->provider->socache_instance, cache_socache_id,
+ &socache_hints, s, pconf);
+ if (rv != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02393)
+ "failed to initialise %s cache", cache_socache_id);
+ return 500; /* An HTTP status would be a misnomer! */
+ }
+ apr_pool_cleanup_register(pconf, (void *) s, destroy_cache,
+ apr_pool_cleanup_null);
+
+ }
+
+ return OK;
+}
+
+static void socache_child_init(apr_pool_t *p, server_rec *s)
+{
+ const char *lock;
+ apr_status_t rv;
+ if (!socache_mutex) {
+ return; /* don't waste the overhead of creating mutex & cache */
+ }
+ lock = apr_global_mutex_lockfile(socache_mutex);
+ rv = apr_global_mutex_child_init(&socache_mutex, lock, p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(02394)
+ "failed to initialise mutex in child_init");
+ }
+}
+
+static const command_rec cache_socache_cmds[] =
+{
+ AP_INIT_TAKE1("CacheSocache", set_cache_socache, NULL, RSRC_CONF,
+ "The shared object cache to store cache files"),
+ AP_INIT_TAKE1("CacheSocacheMaxTime", set_cache_maxtime, NULL, RSRC_CONF | ACCESS_CONF,
+ "The maximum cache expiry age to cache a document in seconds"),
+ AP_INIT_TAKE1("CacheSocacheMinTime", set_cache_mintime, NULL, RSRC_CONF | ACCESS_CONF,
+ "The minimum cache expiry age to cache a document in seconds"),
+ AP_INIT_TAKE1("CacheSocacheMaxSize", set_cache_max, NULL, RSRC_CONF | ACCESS_CONF,
+ "The maximum cache entry size (headers and body) to cache a document"),
+ AP_INIT_TAKE1("CacheSocacheReadSize", set_cache_readsize, NULL, RSRC_CONF | ACCESS_CONF,
+ "The maximum quantity of data to attempt to read and cache in one go"),
+ AP_INIT_TAKE1("CacheSocacheReadTime", set_cache_readtime, NULL, RSRC_CONF | ACCESS_CONF,
+ "The maximum time taken to attempt to read and cache in go"),
+ { NULL }
+};
+
+static const cache_provider cache_socache_provider =
+{
+ &remove_entity, &store_headers, &store_body, &recall_headers, &recall_body,
+ &create_entity, &open_entity, &remove_url, &commit_entity,
+ &invalidate_entity
+};
+
+static void cache_socache_register_hook(apr_pool_t *p)
+{
+ /* cache initializer */
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "socache", "0",
+ &cache_socache_provider);
+ ap_hook_pre_config(socache_precfg, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(socache_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(socache_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+AP_DECLARE_MODULE(cache_socache) = { STANDARD20_MODULE_STUFF,
+ create_dir_config, /* create per-directory config structure */
+ merge_dir_config, /* merge per-directory config structures */
+ create_config, /* create per-server config structure */
+ merge_config, /* merge per-server config structures */
+ cache_socache_cmds, /* command apr_table_t */
+ cache_socache_register_hook /* register hooks */
+};
diff --git a/modules/cache/mod_cache_socache.dsp b/modules/cache/mod_cache_socache.dsp
new file mode 100644
index 00000000..e5d582e2
--- /dev/null
+++ b/modules/cache/mod_cache_socache.dsp
@@ -0,0 +1,115 @@
+# Microsoft Developer Studio Project File - Name="mod_cache_socache" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_cache_socache - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cache_socache.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_cache_socache.mak" CFG="mod_cache_socache - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_cache_socache - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_cache_socache - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_cache_socache - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /Fd"Release\mod_cache_socache_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_cache_socache.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_cache_socache.so" /d LONG_NAME="cache_socache_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_cache_socache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cache_socache.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_cache_socache.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_cache_socache - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../srclib/apr-util/include" /I "../../srclib/apr/include" /I "../../include" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /Fd"Debug\mod_cache_socache_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_cache_socache.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_cache_socache.so" /d LONG_NAME="cache_socache_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_cache_socache.so" /base:@..\..\os\win32\BaseAddr.ref,mod_cache_socache.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_cache_socache.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_cache_socache - Win32 Release"
+# Name "mod_cache_socache - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_cache.h
+# End Source File
+# Begin Source File
+
+SOURCE=.\mod_cache_socache.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/cache/mod_socache_memcache.c b/modules/cache/mod_socache_memcache.c
index ccb1bde7..beeeec2c 100644
--- a/modules/cache/mod_socache_memcache.c
+++ b/modules/cache/mod_socache_memcache.c
@@ -182,19 +182,13 @@ static int socache_mc_id2key(ap_socache_instance_t *ctx,
char *key, apr_size_t keylen)
{
char *cp;
- unsigned int n;
if (idlen * 2 + ctx->taglen >= keylen)
return 1;
cp = apr_cpystrn(key, ctx->tag, ctx->taglen);
+ ap_bin2hex(id, idlen, cp);
- for (n = 0; n < idlen; n++) {
- apr_snprintf(cp, 3, "%02X", (unsigned) id[n]);
- cp += 2;
- }
-
- *cp = '\0';
return 0;
}
diff --git a/modules/cluster/mod_heartmonitor.c b/modules/cluster/mod_heartmonitor.c
index 527bc38e..8e4231dc 100644
--- a/modules/cluster/mod_heartmonitor.c
+++ b/modules/cluster/mod_heartmonitor.c
@@ -792,7 +792,7 @@ static void *hm_create_config(apr_pool_t *p, server_rec *s)
hm_ctx_t *ctx = (hm_ctx_t *) apr_palloc(p, sizeof(hm_ctx_t));
ctx->active = 0;
- ctx->storage_path = ap_server_root_relative(p, "logs/hb.dat");
+ ctx->storage_path = ap_runtime_dir_relative(p, DEFAULT_HEARTBEAT_STORAGE);
/* TODO: Add directive for tuning the update interval
*/
ctx->interval = apr_time_from_sec(HM_UPDATE_SEC);
@@ -816,7 +816,7 @@ static const char *cmd_hm_storage(cmd_parms *cmd,
return err;
}
- ctx->storage_path = ap_server_root_relative(p, path);
+ ctx->storage_path = ap_runtime_dir_relative(p, path);
return NULL;
}
diff --git a/modules/core/NWGNUmakefile b/modules/core/NWGNUmakefile
new file mode 100644
index 00000000..a4dd8d32
--- /dev/null
+++ b/modules/core/NWGNUmakefile
@@ -0,0 +1,257 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = macro
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Macro Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Echo Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/macro.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_macro.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ aprlib \
+ libc \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @aprlib.imp \
+ @httpd.imp \
+ @libc.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ macro_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/core/config.m4 b/modules/core/config.m4
index e2799c91..8bb39a5c 100644
--- a/modules/core/config.m4
+++ b/modules/core/config.m4
@@ -53,6 +53,8 @@ APACHE_MODULE(watchdog, Watchdog module, , , , [
fi
])
+APACHE_MODULE(macro, Define and use macros in configuration files, , , most)
+
APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
APACHE_MODPATH_FINISH
diff --git a/modules/core/mod_macro.c b/modules/core/mod_macro.c
new file mode 100644
index 00000000..ad730054
--- /dev/null
+++ b/modules/core/mod_macro.c
@@ -0,0 +1,953 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ $Id: mod_macro.c 1455215 2013-03-11 16:28:43Z jim $
+*/
+
+#include "httpd.h"
+#include "http_config.h"
+#include "http_log.h"
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_hash.h"
+
+/************************************************ COMPILE TIME DEBUG CONTROL */
+/*
+ debug:
+ #define MOD_MACRO_DEBUG 1
+
+ gdb:
+ run -f ./test/conf/test??.conf
+*/
+/* #define MOD_MACRO_DEBUG 1 */
+#undef MOD_MACRO_DEBUG
+
+#if defined(debug)
+#undef debug
+#endif /* debug */
+
+#if defined(MOD_MACRO_DEBUG)
+#define debug(stmt) stmt
+#else
+#define debug(stmt)
+#endif /* MOD_MACRO_DEBUG */
+
+/******************************************************** MODULE DECLARATION */
+
+module AP_MODULE_DECLARE_DATA macro_module;
+
+/********************************************************** MACRO MANAGEMENT */
+
+/*
+ this is a macro: name, arguments, contents, location.
+*/
+typedef struct
+{
+ char *name; /* lower case name of the macro */
+ apr_array_header_t *arguments; /* of char*, macro parameter names */
+ apr_array_header_t *contents; /* of char*, macro body */
+ char *location; /* of macro definition, for error messages */
+} ap_macro_t;
+
+/* configuration tokens.
+ */
+#define BEGIN_MACRO "<Macro"
+#define END_MACRO "</Macro>"
+#define USE_MACRO "Use"
+#define UNDEF_MACRO "UndefMacro"
+
+/*
+ Macros are kept globally...
+ They are not per-server or per-directory entities.
+
+ I would need a hook BEFORE and AFTER configuration processing
+ to initialize and close them properly, but no such thing is exported,
+ although it could be available from within apache.
+
+ I would have such a hook if in server/config.c
+ The "initializer" does not seem to be called before.
+
+ note: they are in a temp_pool, and there is a lazy initialization.
+
+ hash type: (char *) name -> (ap_macro_t *) macro
+*/
+static apr_hash_t *ap_macros = NULL;
+
+/*************************************************************** PARSE UTILS */
+
+#define empty_string_p(p) (!(p) || *(p) == '\0')
+#define trim(line) while (*(line) == ' ' || *(line) == '\t') (line)++
+
+/*
+ return configuration-parsed arguments from line as an array.
+ the line is expected not to contain any '\n'?
+*/
+static apr_array_header_t *get_arguments(apr_pool_t * pool, const char *line)
+{
+ apr_array_header_t *args = apr_array_make(pool, 1, sizeof(char *));
+
+ trim(line);
+ while (*line) {
+ char *arg = ap_getword_conf(pool, &line);
+ char **new = apr_array_push(args);
+ *new = arg;
+ trim(line);
+ }
+
+ return args;
+}
+
+/*
+ warn if anything non blank appears, but ignore comments...
+*/
+static void warn_if_non_blank(
+ const char * what,
+ char * ptr,
+ ap_configfile_t * cfg)
+{
+ char * p;
+ for (p=ptr; *p; p++) {
+ if (*p == '#')
+ break;
+ if (*p != ' ' && *p != '\t') {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "%s on line %d of %s: %s",
+ what, cfg->line_number, cfg->name, ptr);
+ break;
+ }
+ }
+}
+
+/*
+ get read lines as an array till end_token.
+ counts nesting for begin_token/end_token.
+ it assumes a line-per-line configuration (thru getline).
+ this function could be exported.
+ begin_token may be NULL.
+*/
+static char *get_lines_till_end_token(apr_pool_t * pool,
+ ap_configfile_t * config_file,
+ const char *end_token,
+ const char *begin_token,
+ const char *where,
+ apr_array_header_t ** plines)
+{
+ apr_array_header_t *lines = apr_array_make(pool, 1, sizeof(char *));
+ char line[MAX_STRING_LEN]; /* sorry, but this is expected by getline:-( */
+ int macro_nesting = 1, any_nesting = 1;
+ int line_number_start = config_file->line_number;
+
+ while (!ap_cfg_getline(line, MAX_STRING_LEN, config_file)) {
+ char *ptr = line;
+ char *first, **new;
+ /* skip comments */
+ if (*line == '#')
+ continue;
+ first = ap_getword_conf_nc(pool, &ptr);
+ if (first) {
+ /* detect nesting... */
+ if (!strncmp(first, "</", 2)) {
+ any_nesting--;
+ if (any_nesting < 0) {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING,
+ 0, NULL,
+ "bad (negative) nesting on line %d of %s",
+ config_file->line_number - line_number_start,
+ where);
+ }
+ }
+ else if (!strncmp(first, "<", 1)) {
+ any_nesting++;
+ }
+
+ if (!strcasecmp(first, end_token)) {
+ /* check for proper closing */
+ char * endp = (char *) ap_strrchr_c(line, '>');
+
+ /* this cannot happen if end_token contains '>' */
+ if (endp == NULL) {
+ return "end directive missing closing '>'";
+ }
+
+ warn_if_non_blank(
+ "non blank chars found after directive closing",
+ endp+1, config_file);
+
+ macro_nesting--;
+ if (!macro_nesting) {
+ if (any_nesting) {
+ ap_log_error(APLOG_MARK,
+ APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "bad cumulated nesting (%+d) in %s",
+ any_nesting, where);
+ }
+ *plines = lines;
+ return NULL;
+ }
+ }
+ else if (begin_token && !strcasecmp(first, begin_token)) {
+ macro_nesting++;
+ }
+ }
+ new = apr_array_push(lines);
+ *new = apr_psprintf(pool, "%s" APR_EOL_STR, line); /* put EOL back? */
+ }
+
+ return apr_psprintf(pool, "expected token not found: %s", end_token);
+}
+
+/* the @* arguments are double-quote escaped when substituted */
+#define ESCAPE_ARG '@'
+
+/* other $* and %* arguments are simply replaced without escaping */
+#define ARG_PREFIX "$%@"
+
+/*
+ characters allowed in an argument?
+ not used yet, because that would trigger some backward compatibility.
+*/
+#define ARG_CONTENT \
+ "abcdefghijklmnopqrstuvwxyz" \
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
+ "0123456789_" ARG_PREFIX
+
+/*
+ returns whether it looks like an argument, i.e. prefixed by ARG_PREFIX.
+*/
+static int looks_like_an_argument(const char *word)
+{
+ return ap_strchr(ARG_PREFIX, *word) != 0;
+}
+
+/*
+ generates an error on macro with two arguments of the same name.
+ generates an error if a macro argument name is empty.
+ generates a warning if arguments name prefixes conflict.
+ generates a warning if the first char of an argument is not in ARG_PREFIX
+*/
+static const char *check_macro_arguments(apr_pool_t * pool,
+ const ap_macro_t * macro)
+{
+ char **tab = (char **) macro->arguments->elts;
+ int nelts = macro->arguments->nelts;
+ int i;
+
+ for (i = 0; i < nelts; i++) {
+ size_t ltabi = strlen(tab[i]);
+ int j;
+
+ if (ltabi == 0) {
+ return apr_psprintf(pool,
+ "macro '%s' (%s): empty argument #%d name",
+ macro->name, macro->location, i + 1);
+ }
+ else if (!looks_like_an_argument(tab[i])) {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "macro '%s' (%s) "
+ "argument name '%s' (#%d) without expected prefix, "
+ "better prefix argument names with one of '%s'.",
+ macro->name, macro->location,
+ tab[i], i + 1, ARG_PREFIX);
+ }
+
+ for (j = i + 1; j < nelts; j++) {
+ size_t ltabj = strlen(tab[j]);
+
+ /* must not use the same argument name twice */
+ if (!strcmp(tab[i], tab[j])) {
+ return apr_psprintf(pool,
+ "argument name conflict in macro '%s' (%s): "
+ "argument '%s': #%d and #%d, "
+ "change argument names!",
+ macro->name, macro->location,
+ tab[i], i + 1, j + 1);
+ }
+
+ /* warn about common prefix, but only if non empty names */
+ if (ltabi && ltabj &&
+ !strncmp(tab[i], tab[j], ltabi < ltabj ? ltabi : ltabj)) {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING,
+ 0, NULL,
+ "macro '%s' (%s): "
+ "argument name prefix conflict (%s #%d and %s #%d),"
+ " be careful about your macro definition!",
+ macro->name, macro->location,
+ tab[i], i + 1, tab[j], j + 1);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ warn about empty strings in array. could be legitimate.
+*/
+static void check_macro_use_arguments(const char *where,
+ const apr_array_header_t * array)
+{
+ char **tab = (char **) array->elts;
+ int i;
+ for (i = 0; i < array->nelts; i++) {
+ if (empty_string_p(tab[i])) {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "%s: empty argument #%d", where, i + 1);
+ }
+ }
+}
+
+/******************************************************** SUBSTITUTION UTILS */
+
+/* could be switched to '\'' */
+#define DELIM '"'
+#define ESCAPE '\\'
+
+/*
+ returns the number of needed escapes for the string
+*/
+static int number_of_escapes(const char delim, const char *str)
+{
+ int nesc = 0;
+ const char *s = str;
+ while (*s) {
+ if (*s == ESCAPE || *s == delim)
+ nesc++;
+ s++;
+ }
+ debug(fprintf(stderr, "escapes: %d ---%s---\n", nesc, str));
+ return nesc;
+}
+
+/*
+ replace name by replacement at the beginning of buf of bufsize.
+ returns an error message or NULL.
+ C is not really a nice language for processing strings.
+*/
+static char *substitute(char *buf,
+ const int bufsize,
+ const char *name,
+ const char *replacement, const int do_esc)
+{
+ int lbuf = strlen(buf),
+ lname = strlen(name),
+ lrepl = strlen(replacement),
+ lsubs = lrepl +
+ (do_esc ? (2 + number_of_escapes(DELIM, replacement)) : 0),
+ shift = lsubs - lname, size = lbuf + shift, i, j;
+
+ /* buf must starts with name */
+ ap_assert(!strncmp(buf, name, lname));
+
+ /* hmmm??? */
+ if (!strcmp(name, replacement))
+ return NULL;
+
+ debug(fprintf(stderr,
+ "substitute(%s,%s,%s,%d,sh=%d,lbuf=%d,lrepl=%d,lsubs=%d)\n",
+ buf, name, replacement, do_esc, shift, lbuf, lrepl, lsubs));
+
+ if (size >= bufsize) {
+ /* could/should I reallocate? */
+ return "cannot substitute, buffer size too small";
+ }
+
+ /* cannot use strcpy as strings may overlap */
+ if (shift != 0) {
+ memmove(buf + lname + shift, buf + lname, lbuf - lname + 1);
+ }
+
+ /* insert the replacement with escapes */
+ j = 0;
+ if (do_esc)
+ buf[j++] = DELIM;
+ for (i = 0; i < lrepl; i++, j++) {
+ if (do_esc && (replacement[i] == DELIM || replacement[i] == ESCAPE))
+ buf[j++] = ESCAPE;
+ buf[j] = replacement[i];
+ }
+ if (do_esc)
+ buf[j++] = DELIM;
+
+ return NULL;
+}
+
+/*
+ find first occurence of args in buf.
+ in case of conflict, the LONGEST argument is kept. (could be the FIRST?).
+ returns the pointer and the whichone found, or NULL.
+*/
+static char *next_substitution(const char *buf,
+ const apr_array_header_t * args, int *whichone)
+{
+ char *chosen = NULL, **tab = (char **) args->elts;
+ size_t lchosen = 0;
+ int i;
+
+ for (i = 0; i < args->nelts; i++) {
+ char *found = ap_strstr((char *) buf, tab[i]);
+ size_t lfound = strlen(tab[i]);
+ if (found && (!chosen || found < chosen ||
+ (found == chosen && lchosen < lfound))) {
+ chosen = found;
+ lchosen = lfound;
+ *whichone = i;
+ }
+ }
+
+ return chosen;
+}
+
+/*
+ substitute macro arguments by replacements in buf of bufsize.
+ returns an error message or NULL.
+ if used is defined, returns the used macro arguments.
+*/
+static const char *substitute_macro_args(
+ char *buf,
+ int bufsize,
+ const ap_macro_t * macro,
+ const apr_array_header_t * replacements,
+ apr_array_header_t * used)
+{
+ char *ptr = buf,
+ **atab = (char **) macro->arguments->elts,
+ **rtab = (char **) replacements->elts;
+ int whichone = -1;
+
+ if (used) {
+ ap_assert(used->nalloc >= replacements->nelts);
+ }
+ debug(fprintf(stderr, "1# %s", buf));
+
+ while ((ptr = next_substitution(ptr, macro->arguments, &whichone))) {
+ const char *errmsg = substitute(ptr, buf - ptr + bufsize,
+ atab[whichone], rtab[whichone],
+ atab[whichone][0] == ESCAPE_ARG);
+ if (errmsg) {
+ return errmsg;
+ }
+ ptr += strlen(rtab[whichone]);
+ if (used) {
+ used->elts[whichone] = 1;
+ }
+ }
+ debug(fprintf(stderr, "2# %s", buf));
+
+ return NULL;
+}
+
+/*
+ perform substitutions in a macro contents and
+ return the result as a newly allocated array, if result is defined.
+ may also return an error message.
+ passes used down to substitute_macro_args.
+*/
+static const char *process_content(apr_pool_t * pool,
+ const ap_macro_t * macro,
+ const apr_array_header_t * replacements,
+ apr_array_header_t * used,
+ apr_array_header_t ** result)
+{
+ apr_array_header_t *contents = macro->contents;
+ char line[MAX_STRING_LEN];
+ int i;
+
+ if (result) {
+ *result = apr_array_make(pool, 1, sizeof(char *));
+ }
+
+ /* for each line of the macro body */
+ for (i = 0; i < contents->nelts; i++) {
+ const char *errmsg;
+ /* copy the line and subtitute macro parameters */
+ strncpy(line, ((char **) contents->elts)[i], MAX_STRING_LEN - 1);
+ errmsg = substitute_macro_args(line, MAX_STRING_LEN,
+ macro, replacements, used);
+ if (errmsg) {
+ return apr_psprintf(pool,
+ "while processing line %d of macro '%s' (%s) %s",
+ i + 1, macro->name, macro->location, errmsg);
+ }
+ /* append substituted line to result array */
+ if (result) {
+ char **new = apr_array_push(*result);
+ *new = apr_pstrdup(pool, line);
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ warn if some macro arguments are not used.
+*/
+static const char *check_macro_contents(apr_pool_t * pool,
+ const ap_macro_t * macro)
+{
+ int nelts = macro->arguments->nelts;
+ char **names = (char **) macro->arguments->elts;
+ apr_array_header_t *used;
+ int i;
+ const char *errmsg;
+
+ if (macro->contents->nelts == 0) {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "macro '%s' (%s): empty contents!",
+ macro->name, macro->location);
+ return NULL; /* no need to further warnings... */
+ }
+
+ used = apr_array_make(pool, nelts, sizeof(char));
+
+ for (i = 0; i < nelts; i++) {
+ used->elts[i] = 0;
+ }
+
+ errmsg = process_content(pool, macro, macro->arguments, used, NULL);
+
+ if (errmsg) {
+ return errmsg;
+ }
+
+ for (i = 0; i < nelts; i++) {
+ if (!used->elts[i]) {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "macro '%s' (%s): argument '%s' (#%d) never used",
+ macro->name, macro->location, names[i], i + 1);
+ }
+ }
+
+ return NULL;
+}
+
+
+/************************************************** MACRO PSEUDO CONFIG FILE */
+
+/*
+ The expanded content of the macro is to be parsed as a ap_configfile_t.
+ This is used to have some kind of old fashionned C object oriented inherited
+ data structure for configs.
+
+ The following struct stores the contents.
+
+ This structure holds pointers (next, upper) to the current "file" which was
+ being processed and is interrupted by the macro expansion. At the end
+ of processing the macro, the initial data structure will be put back
+ in place (see function next_one) and the reading will go on from there.
+
+ If macros are used within macros, there may be a cascade of such temporary
+ arrays used to insert the expanded macro contents before resuming the real
+ file processing.
+
+ There is some hopus-pocus to deal with line_number when transiting from
+ one config to the other.
+*/
+typedef struct
+{
+ int index; /* current element */
+ int char_index; /* current char in element */
+ int length; /* cached length of the current line */
+ apr_array_header_t *contents; /* array of char * */
+ ap_configfile_t *next; /* next config once this one is processed */
+ ap_configfile_t **upper; /* hack: where to update it if needed */
+} array_contents_t;
+
+/*
+ Get next config if any.
+ this may be called several times if there are continuations.
+*/
+static int next_one(array_contents_t * ml)
+{
+ if (ml->next) {
+ ap_assert(ml->upper);
+ *(ml->upper) = ml->next;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ returns next char if possible
+ this may involve switching to enclosing config.
+*/
+static apr_status_t array_getch(char *ch, void *param)
+{
+ array_contents_t *ml = (array_contents_t *) param;
+ char **tab = (char **) ml->contents->elts;
+
+ while (ml->char_index >= ml->length) {
+ if (ml->index >= ml->contents->nelts) {
+ /* maybe update */
+ if (ml->next && ml->next->getch && next_one(ml)) {
+ apr_status_t rc = ml->next->getch(ch, ml->next->param);
+ if (*ch==LF)
+ ml->next->line_number++;
+ return rc;
+ }
+ return APR_EOF;
+ }
+ ml->index++;
+ ml->char_index = 0;
+ ml->length = ml->index >= ml->contents->nelts ?
+ 0 : strlen(tab[ml->index]);
+ }
+
+ *ch = tab[ml->index][ml->char_index++];
+ return APR_SUCCESS;
+}
+
+/*
+ returns a buf a la fgets.
+ no more than a line at a time, otherwise the parsing is too much ahead...
+ NULL at EOF.
+*/
+static apr_status_t array_getstr(void *buf, size_t bufsize, void *param)
+{
+ array_contents_t *ml = (array_contents_t *) param;
+ char *buffer = (char *) buf;
+ char next = '\0';
+ size_t i = 0;
+ apr_status_t rc = APR_SUCCESS;
+
+ /* read chars from stream, stop on newline */
+ while (i < bufsize - 1 && next != LF &&
+ ((rc = array_getch(&next, param)) == APR_SUCCESS)) {
+ buffer[i++] = next;
+ }
+
+ if (rc == APR_EOF) {
+ /* maybe update to next, possibly a recursion */
+ if (next_one(ml)) {
+ ap_assert(ml->next->getstr);
+ /* keep next line count in sync! the caller will update
+ the current line_number, we need to forward to the next */
+ ml->next->line_number++;
+ return ml->next->getstr(buf, bufsize, ml->next->param);
+ }
+ /* else that is really all we can do */
+ return APR_EOF;
+ }
+
+ buffer[i] = '\0';
+
+ return APR_SUCCESS;
+}
+
+/*
+ close the array stream?
+*/
+static apr_status_t array_close(void *param)
+{
+ array_contents_t *ml = (array_contents_t *) param;
+ /* move index at end of stream... */
+ ml->index = ml->contents->nelts;
+ ml->char_index = ml->length;
+ return APR_SUCCESS;
+}
+
+/*
+ create an array config stream insertion "object".
+ could be exported.
+*/
+static ap_configfile_t *make_array_config(apr_pool_t * pool,
+ apr_array_header_t * contents,
+ const char *where,
+ ap_configfile_t * cfg,
+ ap_configfile_t ** upper)
+{
+ array_contents_t *ls =
+ (array_contents_t *) apr_palloc(pool, sizeof(array_contents_t));
+ ap_assert(ls!=NULL);
+
+ ls->index = 0;
+ ls->char_index = 0;
+ ls->contents = contents;
+ ls->length = ls->contents->nelts < 1 ?
+ 0 : strlen(((char **) ls->contents->elts)[0]);
+ ls->next = cfg;
+ ls->upper = upper;
+
+ return ap_pcfg_open_custom(pool, where, (void *) ls,
+ array_getch, array_getstr, array_close);
+}
+
+
+/********************************************************** KEYWORD HANDLING */
+
+/*
+ handles: <Macro macroname arg1 arg2 ...> any trash there is ignored...
+*/
+static const char *macro_section(cmd_parms * cmd,
+ void *dummy, const char *arg)
+{
+ apr_pool_t *pool;
+ char *endp, *name, *where;
+ const char *errmsg;
+ ap_macro_t *macro;
+
+ debug(fprintf(stderr, "macro_section: arg='%s'\n", arg));
+
+ /* lazy initialization */
+ if (ap_macros == NULL)
+ ap_macros = apr_hash_make(cmd->temp_pool);
+ ap_assert(ap_macros != NULL);
+
+ pool = apr_hash_pool_get(ap_macros);
+
+ endp = (char *) ap_strrchr_c(arg, '>');
+
+ if (endp == NULL) {
+ return BEGIN_MACRO "> directive missing closing '>'";
+ }
+
+ if (endp == arg) {
+ return BEGIN_MACRO " macro definition: empty name";
+ }
+
+ warn_if_non_blank("non blank chars found after " BEGIN_MACRO " closing '>'",
+ endp+1, cmd->config_file);
+
+ /* coldly drop '>[^>]*$' out */
+ *endp = '\0';
+
+ /* get lowercase macro name */
+ name = ap_getword_conf(pool, &arg);
+ if (empty_string_p(name)) {
+ return BEGIN_MACRO " macro definition: name not found";
+ }
+
+ ap_str_tolower(name);
+ macro = apr_hash_get(ap_macros, name, APR_HASH_KEY_STRING);
+
+ if (macro != NULL) {
+ /* already defined: warn about the redefinition */
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "macro '%s' multiply defined: "
+ "%s, redefined on line %d of \"%s\"",
+ macro->name, macro->location,
+ cmd->config_file->line_number, cmd->config_file->name);
+ }
+ else {
+ /* allocate a new macro */
+ macro = (ap_macro_t *) apr_palloc(pool, sizeof(ap_macro_t));
+ macro->name = name;
+ }
+
+ debug(fprintf(stderr, "macro_section: name=%s\n", name));
+
+ /* get macro arguments */
+ macro->location = apr_psprintf(pool,
+ "defined on line %d of \"%s\"",
+ cmd->config_file->line_number,
+ cmd->config_file->name);
+ debug(fprintf(stderr, "macro_section: location=%s\n", macro->location));
+
+ where =
+ apr_psprintf(pool, "macro '%s' (%s)", macro->name, macro->location);
+
+ if (looks_like_an_argument(name)) {
+ ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_WARNING, 0, NULL,
+ "%s better prefix a macro name with any of '%s'",
+ where, ARG_PREFIX);
+ }
+
+ /* get macro parameters */
+ macro->arguments = get_arguments(pool, arg);
+
+ errmsg = check_macro_arguments(cmd->temp_pool, macro);
+
+ if (errmsg) {
+ return errmsg;
+ }
+
+ errmsg = get_lines_till_end_token(pool, cmd->config_file,
+ END_MACRO, BEGIN_MACRO,
+ where, &macro->contents);
+
+ if (errmsg) {
+ return apr_psprintf(cmd->temp_pool,
+ "%s" APR_EOL_STR "\tcontents error: %s",
+ where, errmsg);
+ }
+
+ errmsg = check_macro_contents(cmd->temp_pool, macro);
+
+ if (errmsg) {
+ return apr_psprintf(cmd->temp_pool,
+ "%s" APR_EOL_STR "\tcontents checking error: %s",
+ where, errmsg);
+ }
+
+ /* store the new macro */
+ apr_hash_set(ap_macros, name, APR_HASH_KEY_STRING, macro);
+
+ return NULL;
+}
+
+/*
+ handles: Use name value1 value2 ...
+*/
+static const char *use_macro(cmd_parms * cmd, void *dummy, const char *arg)
+{
+ char *name, *recursion, *where;
+ const char *errmsg;
+ ap_macro_t *macro;
+ apr_array_header_t *replacements;
+ apr_array_header_t *contents;
+
+ debug(fprintf(stderr, "use_macro -%s-\n", arg));
+
+ /* must be initialized, or no macros has been defined */
+ if (ap_macros == NULL) {
+ return "no macro defined before " USE_MACRO;
+ }
+
+ /* get lowercase macro name */
+ name = ap_getword_conf(cmd->temp_pool, &arg);
+ ap_str_tolower(name);
+
+ if (empty_string_p(name)) {
+ return "no macro name specified with " USE_MACRO;
+ }
+
+ /* get macro definition */
+ macro = apr_hash_get(ap_macros, name, APR_HASH_KEY_STRING);
+
+ if (!macro) {
+ return apr_psprintf(cmd->temp_pool, "macro '%s' undefined", name);
+ }
+
+ /* recursion is detected here by looking at the config file name,
+ * which may already contains "macro 'foo'". Ok, it looks like a hack,
+ * but otherwise it is uneasy to keep this data available somewhere...
+ * the name has just the needed visibility and liveness.
+ */
+ recursion =
+ apr_pstrcat(cmd->temp_pool, "macro '", macro->name, "'", NULL);
+
+ if (ap_strstr((char *) cmd->config_file->name, recursion)) {
+ return apr_psprintf(cmd->temp_pool,
+ "recursive use of macro '%s' is invalid",
+ macro->name);
+ }
+
+ /* get macro arguments */
+ replacements = get_arguments(cmd->temp_pool, arg);
+
+ if (macro->arguments->nelts != replacements->nelts) {
+ return apr_psprintf(cmd->temp_pool,
+ "macro '%s' (%s) used "
+ "with %d arguments instead of %d",
+ macro->name, macro->location,
+ replacements->nelts, macro->arguments->nelts);
+ }
+
+ where = apr_psprintf(cmd->temp_pool,
+ "macro '%s' (%s) used on line %d of \"%s\"",
+ macro->name, macro->location,
+ cmd->config_file->line_number,
+ cmd->config_file->name);
+
+ check_macro_use_arguments(where, replacements);
+
+ errmsg = process_content(cmd->temp_pool, macro, replacements,
+ NULL, &contents);
+
+ if (errmsg) {
+ return apr_psprintf(cmd->temp_pool,
+ "%s error while substituting: %s",
+ where, errmsg);
+ }
+
+ /* the current "config file" is replaced by a string array...
+ at the end of processing the array, the initial config file
+ will be returned there (see next_one) so as to go on. */
+ cmd->config_file = make_array_config(cmd->temp_pool, contents, where,
+ cmd->config_file, &cmd->config_file);
+
+ return NULL;
+}
+
+static const char *undef_macro(cmd_parms * cmd, void *dummy, const char *arg)
+{
+ char *name;
+ ap_macro_t *macro;
+
+ /* must be initialized, or no macros has been defined */
+ if (ap_macros == NULL) {
+ return "no macro defined before " UNDEF_MACRO;
+ }
+
+ if (empty_string_p(arg)) {
+ return "no macro name specified with " UNDEF_MACRO;
+ }
+
+ /* check that the macro is defined */
+ name = apr_pstrdup(cmd->temp_pool, arg);
+ ap_str_tolower(name);
+ macro = apr_hash_get(ap_macros, name, APR_HASH_KEY_STRING);
+ if (macro == NULL) {
+ /* could be a warning? */
+ return apr_psprintf(cmd->temp_pool,
+ "cannot remove undefined macro '%s'", name);
+ }
+
+ /* free macro: cannot do that */
+ /* remove macro from hash table */
+ apr_hash_set(ap_macros, name, APR_HASH_KEY_STRING, NULL);
+
+ return NULL;
+}
+
+/************************************************************* EXPORT MODULE */
+
+/*
+ macro module commands.
+ configuration file macro stuff
+ they are processed immediatly when found, hence the EXEC_ON_READ.
+*/
+static const command_rec macro_cmds[] = {
+ AP_INIT_RAW_ARGS(BEGIN_MACRO, macro_section, NULL, EXEC_ON_READ | OR_ALL,
+ "Beginning of a macro definition section."),
+ AP_INIT_RAW_ARGS(USE_MACRO, use_macro, NULL, EXEC_ON_READ | OR_ALL,
+ "Use of a macro."),
+ AP_INIT_TAKE1(UNDEF_MACRO, undef_macro, NULL, EXEC_ON_READ | OR_ALL,
+ "Remove a macro definition."),
+
+ {NULL}
+};
+
+/*
+ Module hooks are request-oriented thus it does not suit configuration
+ file utils a lot. I haven't found any clean hook to apply something
+ before then after configuration file processing. Also what about
+ .htaccess files?
+
+ Thus I think that server/util.c or server/config.c
+ would be a better place for this stuff.
+*/
+
+AP_DECLARE_MODULE(macro) = {
+ STANDARD20_MODULE_STUFF, /* common stuff */
+ NULL, /* create per-directory config */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ macro_cmds, /* configuration commands */
+ NULL /* register hooks */
+};
diff --git a/modules/core/mod_macro.dsp b/modules/core/mod_macro.dsp
new file mode 100644
index 00000000..61914cc7
--- /dev/null
+++ b/modules/core/mod_macro.dsp
@@ -0,0 +1,111 @@
+# Microsoft Developer Studio Project File - Name="mod_macro" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_macro - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_macro.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_macro.mak" CFG="mod_macro - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_macro - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_macro - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_macro - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_macro_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG" /i "../../include" /i "../../srclib/apr/include" /d BIN_NAME="mod_macro.so" /d LONG_NAME="macro_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /out:".\Release\mod_macro.so" /base:@..\..\os\win32\BaseAddr.ref,mod_macro.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_macro.so" /base:@..\..\os\win32\BaseAddr.ref,mod_macro.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_macro.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_macro - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_macro_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /i "../../include" /i "../../srclib/apr/include" /d BIN_NAME="mod_macro.so" /d LONG_NAME="macro_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_macro.so" /base:@..\..\os\win32\BaseAddr.ref,mod_macro.so
+# ADD LINK32 kernel32.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_macro.so" /base:@..\..\os\win32\BaseAddr.ref,mod_macro.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_macro.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_macro - Win32 Release"
+# Name "mod_macro - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\mod_macro.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/core/test/Makefile b/modules/core/test/Makefile
new file mode 100755
index 00000000..8ebb1a9e
--- /dev/null
+++ b/modules/core/test/Makefile
@@ -0,0 +1,69 @@
+#
+# $Id: Makefile 1435811 2013-01-20 10:07:44Z fabien $
+#
+# mod_macro non regression tests
+
+# where is apache
+APA.dir = /tmp/apache
+
+# apache executable with mod macro loaded
+HTTPD = \
+ $(APA.dir)/bin/httpd \
+ -C 'LoadModule macro_module modules/mod_macro.so' \
+ -C "Define PWD $$PWD/conf"
+
+# default target
+.PHONY: default
+default: clean
+
+# run all non regression tests
+.PHONY: check
+check: check-out
+
+# result directory
+OUT = out
+out:
+ mkdir $@
+
+# test cases & results
+F.conf = $(wildcard conf/test*.conf)
+F.out = $(F.conf:conf/%.conf=$(OUT)/%.out)
+
+# run all tests
+.PHONY: run-test
+run-test: $(F.out)
+
+# generate & compare in a separate directory
+.PHONY: check-out
+check-out: out
+ $(RM) out/*.out
+ $(MAKE) OUT=out run-test
+ diff -r out/ ref/
+
+# generate & compare in the same directory
+.PHONY: check-ref
+check-ref:
+ $(RM) ref/*.out
+ $(MAKE) OUT=ref run-test
+ svn diff ref/
+
+# run one test case
+# filter output so that it is portable
+# use '|' sed separator because $PWD will contain plenty '/'
+$(OUT)/%.out: conf/%.conf
+ { \
+ echo "# testing with $<" ; \
+ $(HTTPD) -f $$PWD/$< 2>&1 ; \
+ echo "# exit: $$?" ; \
+ } > $@.tmp ; \
+ sed -e "s|$$PWD|.|g" \
+ -e "s|^\[[\.a-zA-Z0-9 :]*\] ||" \
+ -e "s|\[pid [0-9]*:tid [0-9]*] ||" \
+ $@.tmp > $@ ; \
+ $(RM) $@.tmp
+
+# cleanup
+.PHONY: clean
+clean:
+ $(RM) *~
+ $(RM) -r out
diff --git a/modules/core/test/conf/inc63_1.conf b/modules/core/test/conf/inc63_1.conf
new file mode 100644
index 00000000..6a436f9c
--- /dev/null
+++ b/modules/core/test/conf/inc63_1.conf
@@ -0,0 +1,5 @@
+# macro for include
+<Macro Foo $where>
+ Warning "Foo macro at $where"
+</Macro>
+Use Foo "inc63_.conf:5"
diff --git a/modules/core/test/conf/inc63_2.conf b/modules/core/test/conf/inc63_2.conf
new file mode 100644
index 00000000..3a0da9ea
--- /dev/null
+++ b/modules/core/test/conf/inc63_2.conf
@@ -0,0 +1,3 @@
+# use macro defined elsewhere
+Use Foo "inc63_2.conf:2"
+Use Bla "inc63_2.conf:3"
diff --git a/modules/core/test/conf/test01.conf b/modules/core/test/conf/test01.conf
new file mode 100755
index 00000000..9a3d9abb
--- /dev/null
+++ b/modules/core/test/conf/test01.conf
@@ -0,0 +1,3 @@
+# no macro name
+<Macro>
+</Macro>
diff --git a/modules/core/test/conf/test02.conf b/modules/core/test/conf/test02.conf
new file mode 100755
index 00000000..1fe4b412
--- /dev/null
+++ b/modules/core/test/conf/test02.conf
@@ -0,0 +1,3 @@
+# no macro name and spaces
+<Macro >
+</Macro>
diff --git a/modules/core/test/conf/test03.conf b/modules/core/test/conf/test03.conf
new file mode 100755
index 00000000..fdcf4c8c
--- /dev/null
+++ b/modules/core/test/conf/test03.conf
@@ -0,0 +1,5 @@
+# use undefined macro
+<Macro foo>
+ Warning "macro foo"
+</Macro>
+Use bla
diff --git a/modules/core/test/conf/test04.conf b/modules/core/test/conf/test04.conf
new file mode 100755
index 00000000..82287754
--- /dev/null
+++ b/modules/core/test/conf/test04.conf
@@ -0,0 +1,5 @@
+# wrong args
+<Macro foo>
+ Warning "macro foo"
+</Macro>
+Use foo hello
diff --git a/modules/core/test/conf/test05.conf b/modules/core/test/conf/test05.conf
new file mode 100755
index 00000000..2166acae
--- /dev/null
+++ b/modules/core/test/conf/test05.conf
@@ -0,0 +1,5 @@
+# wrong args
+<Macro foo $premier>
+ Warning "macro foo $premier"
+</Macro>
+Use foo
diff --git a/modules/core/test/conf/test06.conf b/modules/core/test/conf/test06.conf
new file mode 100755
index 00000000..35e6b684
--- /dev/null
+++ b/modules/core/test/conf/test06.conf
@@ -0,0 +1,6 @@
+# wrong args
+<Macro foo $premier>
+ Warning "macro foo $premier"
+</Macro>
+Use foo one two
+
diff --git a/modules/core/test/conf/test07.conf b/modules/core/test/conf/test07.conf
new file mode 100755
index 00000000..06f050e4
--- /dev/null
+++ b/modules/core/test/conf/test07.conf
@@ -0,0 +1,3 @@
+# missing end macro
+<Macro foo $premier>
+hello
diff --git a/modules/core/test/conf/test08.conf b/modules/core/test/conf/test08.conf
new file mode 100755
index 00000000..333dbd9b
--- /dev/null
+++ b/modules/core/test/conf/test08.conf
@@ -0,0 +1,3 @@
+# missing begin macro
+ServerName hello
+</Macro>
diff --git a/modules/core/test/conf/test09.conf b/modules/core/test/conf/test09.conf
new file mode 100755
index 00000000..2513b6eb
--- /dev/null
+++ b/modules/core/test/conf/test09.conf
@@ -0,0 +1,6 @@
+# recursion is bad
+<Macro foo>
+Use foo
+</Macro>
+
+Use foo
diff --git a/modules/core/test/conf/test10.conf b/modules/core/test/conf/test10.conf
new file mode 100755
index 00000000..157129d0
--- /dev/null
+++ b/modules/core/test/conf/test10.conf
@@ -0,0 +1,10 @@
+# indirect recursion is bad
+<Macro foo>
+Use bla
+</Macro>
+
+<Macro bla>
+Use foo
+</Macro>
+
+Use foo
diff --git a/modules/core/test/conf/test11.conf b/modules/core/test/conf/test11.conf
new file mode 100755
index 00000000..f397ec63
--- /dev/null
+++ b/modules/core/test/conf/test11.conf
@@ -0,0 +1,15 @@
+# inner macros...
+<Macro foo $arg>
+<Macro $arg.in>
+Warning "macro $arg.in line 1"
+</Macro>
+</Macro>
+
+# generate a one.in macro
+Use foo one
+
+# use it!
+Use one.in
+
+# end processing
+Error "done line 15."
diff --git a/modules/core/test/conf/test12.conf b/modules/core/test/conf/test12.conf
new file mode 100755
index 00000000..84403c6c
--- /dev/null
+++ b/modules/core/test/conf/test12.conf
@@ -0,0 +1,12 @@
+# multiply defined generates a warning
+<Macro foo>
+ Warning "macro foo 1, line 1"
+</Macro>
+
+<Macro foo>
+ Warning "macro foo 2, line 1"
+</Macro>
+
+Use foo
+
+Error "done line 12."
diff --git a/modules/core/test/conf/test13.conf b/modules/core/test/conf/test13.conf
new file mode 100755
index 00000000..244470df
--- /dev/null
+++ b/modules/core/test/conf/test13.conf
@@ -0,0 +1,18 @@
+# case insensitive
+<Macro FOO>
+ Warning "macro FOO line 1"
+</MACRO>
+
+<MACRO bla>
+ Warning "macro bla line 1"
+</macro>
+
+use foo
+
+<macro foo>
+ Warning "redefined macro foo line 1"
+</macro>
+
+use FOO
+
+Error "done line 18."
diff --git a/modules/core/test/conf/test14.conf b/modules/core/test/conf/test14.conf
new file mode 100755
index 00000000..48d88889
--- /dev/null
+++ b/modules/core/test/conf/test14.conf
@@ -0,0 +1,23 @@
+# VirtualHost example
+
+<Macro MyVirtualHost $host $port $dir>
+ Listen $port
+ <VirtualHost $host:$port>
+ DocumentRoot $dir
+ <Directory $dir>
+ Warning "directory $dir"
+ </Directory>
+ # limit access to intranet subdir.
+ <Directory $dir/intranet>
+ Warning "directory $dir/intranet"
+ </Directory>
+ </VirtualHost>
+</Macro>
+
+Use MyVirtualHost www.apache.org 80 /projects/apache/web
+
+Use MyVirtualHost www.perl.com 8080 /projects/perl/web
+
+Use MyVirtualHost www.ensmp.fr 1234 /projects/mines/web
+
+Error "done line 23."
diff --git a/modules/core/test/conf/test15.conf b/modules/core/test/conf/test15.conf
new file mode 100755
index 00000000..7990e154
--- /dev/null
+++ b/modules/core/test/conf/test15.conf
@@ -0,0 +1,9 @@
+# non nested...
+<macro test>
+<directory /tmp>
+</macro>
+
+use test
+</directory>
+
+Error should not reach this point.
diff --git a/modules/core/test/conf/test16.conf b/modules/core/test/conf/test16.conf
new file mode 100755
index 00000000..471f66e6
--- /dev/null
+++ b/modules/core/test/conf/test16.conf
@@ -0,0 +1,11 @@
+# bad nesting
+
+<Macro foo>
+</Limit>
+</Macro>
+
+<Limit GET>
+Use foo
+</Limit>
+
+stop
diff --git a/modules/core/test/conf/test17.conf b/modules/core/test/conf/test17.conf
new file mode 100755
index 00000000..f6294bbb
--- /dev/null
+++ b/modules/core/test/conf/test17.conf
@@ -0,0 +1,10 @@
+# bad but good nesting
+
+<Macro foo>
+</Directory>
+</Macro>
+
+<Directory /tmp>
+Use foo
+
+Error "done on line 10."
diff --git a/modules/core/test/conf/test18.conf b/modules/core/test/conf/test18.conf
new file mode 100755
index 00000000..118617de
--- /dev/null
+++ b/modules/core/test/conf/test18.conf
@@ -0,0 +1,10 @@
+# bad but good nesting
+
+<Macro foo>
+</Location>
+</Macro>
+
+<Location /intranet>
+Use foo
+
+Error "done on line 10."
diff --git a/modules/core/test/conf/test19.conf b/modules/core/test/conf/test19.conf
new file mode 100755
index 00000000..6568e968
--- /dev/null
+++ b/modules/core/test/conf/test19.conf
@@ -0,0 +1,26 @@
+# okay till done
+
+<Macro foo $where>
+ # something
+ Warning "macro foo line 2 in $where"
+</Macro>
+
+<Directory /tmp>
+ Use foo Directory
+</Directory>
+
+<Location /intra>
+ Use foo Location
+</Location>
+
+<VirtualHost www.apache.org>
+ Use foo VirtualHost
+</VirtualHost>
+
+<VirtualHost www.perl.com>
+ <Directory /tmp>
+ Use foo "VirtualHost & Directory"
+ </Directory>
+</VirtualHost>
+
+Error "done line 26."
diff --git a/modules/core/test/conf/test20.conf b/modules/core/test/conf/test20.conf
new file mode 100755
index 00000000..ccbae0d3
--- /dev/null
+++ b/modules/core/test/conf/test20.conf
@@ -0,0 +1,11 @@
+# directory in directory through a macro
+
+<Macro foo $dir>
+ <Directory $dir>
+ Warning "macro foo $dir"
+ </Directory>
+</Macro>
+
+<Directory /tmp>
+ Use foo /tmp
+</Directory>
diff --git a/modules/core/test/conf/test21.conf b/modules/core/test/conf/test21.conf
new file mode 100755
index 00000000..7a8c4c9a
--- /dev/null
+++ b/modules/core/test/conf/test21.conf
@@ -0,0 +1,11 @@
+# raise an error
+
+<Macro foo>
+ <Directory /tmp>
+ Error "macro foo dir /tmp"
+ </Directory>
+</Macro>
+
+<VirtualHost *>
+ Use foo
+</VirtualHost>
diff --git a/modules/core/test/conf/test22.conf b/modules/core/test/conf/test22.conf
new file mode 100755
index 00000000..5a89f83a
--- /dev/null
+++ b/modules/core/test/conf/test22.conf
@@ -0,0 +1,11 @@
+# simple nesting
+
+<Macro foo>
+ <Directory /tmp>
+ Warning "macro foo"
+ </Directory>
+</Macro>
+
+Use foo
+
+Error "done on line 11."
diff --git a/modules/core/test/conf/test23.conf b/modules/core/test/conf/test23.conf
new file mode 100755
index 00000000..e21e2ee6
--- /dev/null
+++ b/modules/core/test/conf/test23.conf
@@ -0,0 +1,15 @@
+# macro defined in a directory
+
+<Directory /tmp>
+ <Macro foo>
+ Warning "macro foo in /tmp"
+ </Macro>
+</Directory>
+
+Use foo
+
+<Directory /tmp>
+ Use foo
+</Directory>
+
+Error "done!"
diff --git a/modules/core/test/conf/test24.conf b/modules/core/test/conf/test24.conf
new file mode 100755
index 00000000..d35070e6
--- /dev/null
+++ b/modules/core/test/conf/test24.conf
@@ -0,0 +1,23 @@
+# nesting...
+
+<Macro bla>
+ <Location /intra>
+ Warning "macro bla intra"
+ </Location>
+ <Location /private>
+ Warning "macro bla private"
+ </Location>
+</Macro>
+
+# ok location in config
+Use bla
+
+# ok, location in VH
+<VirtualHost foo.com>
+ Use bla
+</VirtualHost>
+
+<Directory /tmp>
+ # fails: Location within an Directory
+ Use bla
+</Directory>
diff --git a/modules/core/test/conf/test25.conf b/modules/core/test/conf/test25.conf
new file mode 100755
index 00000000..724cf942
--- /dev/null
+++ b/modules/core/test/conf/test25.conf
@@ -0,0 +1,27 @@
+# ok till stop.
+
+<Macro RestrictedAccessPolicy $ips>
+ Warning "restricted access policy $ips"
+</Macro>
+
+<Directory /unexpected/1>
+ Use RestrictedAccessPolicy 10.0.0.0/8
+</Directory>
+
+<Macro LocalAccessOnly>
+ Use RestrictedAccessPolicy 10.0.0.0/8
+</Macro>
+
+<Directory /unexpected/2>
+ Use RestrictedAccessPolicy "192.54.172.0/24 192.54.148.0/24 10.0.0.0/8"
+</Directory>
+
+<Location /intra>
+ Use LocalAccessOnly
+</Location>
+
+<Location /admin>
+ Use LocalAccessOnly
+</Location>
+
+Error "done line 27."
diff --git a/modules/core/test/conf/test26.conf b/modules/core/test/conf/test26.conf
new file mode 100755
index 00000000..bb4b5ad0
--- /dev/null
+++ b/modules/core/test/conf/test26.conf
@@ -0,0 +1,19 @@
+# ok till stop.
+# test quotes...
+
+<Macro funny "first arg" 'second ... arg'>
+ <Directory first arg>
+ Warning "funny directory"
+ </Directory>
+ <Location second ... arg>
+ Warning "funny location"
+ </Location>
+</Macro>
+
+Use funny /unexpected/1 /intra
+
+<VirtualHost www.apache.org>
+ Use funny /unexpected/2 /intranet
+</VirtualHost>
+
+Error "done!"
diff --git a/modules/core/test/conf/test27.conf b/modules/core/test/conf/test27.conf
new file mode 100755
index 00000000..2f3e83c7
--- /dev/null
+++ b/modules/core/test/conf/test27.conf
@@ -0,0 +1,22 @@
+# define a macro in a macro.
+
+<Macro foo $dir $name>
+ <Macro foo.$name>
+ <Directory $dir>
+ Warning "foo.$name $dir"
+ </Directory>
+ </Macro>
+</Macro>
+
+Use foo /unexpected/1 one
+Use foo /unexpected/2 two
+
+Use foo.one
+Use foo.two
+Use foo.one
+
+UndefMacro foo.one
+UndefMacro foo.two
+UndefMacro foo
+
+Error "done!"
diff --git a/modules/core/test/conf/test28.conf b/modules/core/test/conf/test28.conf
new file mode 100755
index 00000000..69c1c9b6
--- /dev/null
+++ b/modules/core/test/conf/test28.conf
@@ -0,0 +1,13 @@
+# interaction with IfModule
+
+<IfModule mod_macro.c>
+ <Macro foo>
+ Warning "macro foo"
+ </Macro>
+
+ Use foo
+
+ Error "done!"
+</IfModule>
+
+Error "should not get there"
diff --git a/modules/core/test/conf/test29.conf b/modules/core/test/conf/test29.conf
new file mode 100755
index 00000000..7d1f380f
--- /dev/null
+++ b/modules/core/test/conf/test29.conf
@@ -0,0 +1,10 @@
+# trigger line overflow during expansion
+
+<Macro toobigaline a>
+ Warning aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+</Macro>
+
+Use toobigaline "cette ligne va etre vraiment trop longue ya pas de doute"
+
+Error "should not get there!"
diff --git a/modules/core/test/conf/test30.conf b/modules/core/test/conf/test30.conf
new file mode 100755
index 00000000..72b2bb2c
--- /dev/null
+++ b/modules/core/test/conf/test30.conf
@@ -0,0 +1,12 @@
+# name conficts: the longest is chosen
+# also test a parametric section
+
+<Macro foo $dir $directive>
+ <$directive $dir>
+ Warning "section $directive $dir"
+ </$directive>
+</Macro>
+
+Use foo /unexpected/1 Directory
+
+Error "done!"
diff --git a/modules/core/test/conf/test31.conf b/modules/core/test/conf/test31.conf
new file mode 100755
index 00000000..14964ba3
--- /dev/null
+++ b/modules/core/test/conf/test31.conf
@@ -0,0 +1,16 @@
+# parameter name conflicts
+
+<Macro bla $dir $di $dd $d>
+ Warning "argument name conflicts"
+ $d $di $dir $dd
+</Macro>
+
+Use bla '' '' 8080 Listen
+
+<Macro foo $d $di $dir $dd>
+ Warning "conflicts, but arguments are not used"
+</Macro>
+
+Use foo '' '' 8080 Listen
+
+Error "done on line 16."
diff --git a/modules/core/test/conf/test32.conf b/modules/core/test/conf/test32.conf
new file mode 100755
index 00000000..0b116eca
--- /dev/null
+++ b/modules/core/test/conf/test32.conf
@@ -0,0 +1,7 @@
+# error if same argument name.
+
+<Macro foo $arg1 $arg2 $arg3 $arg2>
+# bad
+</Macro>
+
+
diff --git a/modules/core/test/conf/test33.conf b/modules/core/test/conf/test33.conf
new file mode 100755
index 00000000..2a8a6dba
--- /dev/null
+++ b/modules/core/test/conf/test33.conf
@@ -0,0 +1,3 @@
+# empty name.
+
+Use ''
diff --git a/modules/core/test/conf/test34.conf b/modules/core/test/conf/test34.conf
new file mode 100755
index 00000000..1f4671f4
--- /dev/null
+++ b/modules/core/test/conf/test34.conf
@@ -0,0 +1,14 @@
+# macro parameter prefix conflicts in two orders
+
+<Macro foo $d $dd>
+ Warning "macro foo conflict one"
+</Macro>
+
+<Macro bla $dd $d>
+ Warning "macro bla conflict two"
+</Macro>
+
+Use foo 1 2
+Use bla 1 2
+
+Error "done on line 14."
diff --git a/modules/core/test/conf/test35.conf b/modules/core/test/conf/test35.conf
new file mode 100755
index 00000000..37a623ed
--- /dev/null
+++ b/modules/core/test/conf/test35.conf
@@ -0,0 +1,10 @@
+# unused arguments
+
+<Macro warnings u1 u2 n1 n2 u3>
+ Warning "macro cannot be used just within a comment u1 u2 u3"
+ # n1 n2
+</Macro>
+
+Use warnings 1 2 3 4 5
+
+Error "done on line 10."
diff --git a/modules/core/test/conf/test36.conf b/modules/core/test/conf/test36.conf
new file mode 100755
index 00000000..a68667e1
--- /dev/null
+++ b/modules/core/test/conf/test36.conf
@@ -0,0 +1,12 @@
+<Macro warnings $u $n $u1 $n1 $u2 $n2>
+ Warning "many warnings! $u $u1 $u2"
+ # $n $n1 $n2
+</Macro>
+
+# warn about unused arguments
+Use warnings 1 2 3 4 5 6
+
+# may warn about empty arguments?
+Use warnings '' '' '' '' '' ''
+
+Error "done!"
diff --git a/modules/core/test/conf/test37.conf b/modules/core/test/conf/test37.conf
new file mode 100755
index 00000000..296dde75
--- /dev/null
+++ b/modules/core/test/conf/test37.conf
@@ -0,0 +1,7 @@
+# empty argument name
+
+<Macro stupid ''>
+ Warn "macro stupid"
+</Macro>
+
+Use stupid hello
diff --git a/modules/core/test/conf/test38.conf b/modules/core/test/conf/test38.conf
new file mode 100755
index 00000000..184763f0
--- /dev/null
+++ b/modules/core/test/conf/test38.conf
@@ -0,0 +1,10 @@
+# ifmodule
+
+<IfModule mod_macro.c>
+Warning it is really a good idea to have mod_macro.c installed.
+</IfModule>
+
+<IfModule !mod_perl.c>
+Error it seems you do not have mod perl installed.
+</IfModule>
+
diff --git a/modules/core/test/conf/test39.conf b/modules/core/test/conf/test39.conf
new file mode 100755
index 00000000..792232f2
--- /dev/null
+++ b/modules/core/test/conf/test39.conf
@@ -0,0 +1,23 @@
+# okay till stop.
+
+<IfModule mod_macro.c>
+ <Macro ModMacro>
+ Warning Thanks for using mod_macro!
+ </Macro>
+</IfModule>
+
+<IfModule !mod_macro.c>
+ <Macro ModMacro>
+ Error Sorry, mod_macro must be installed to run this configuration file.
+ </Macro>
+</IfModule>
+
+Use ModMacro
+
+<Macro foo>
+ Warning "macro foo"
+</Macro>
+
+Use foo
+
+Error "done!"
diff --git a/modules/core/test/conf/test40.conf b/modules/core/test/conf/test40.conf
new file mode 100755
index 00000000..e6b81f7a
--- /dev/null
+++ b/modules/core/test/conf/test40.conf
@@ -0,0 +1,33 @@
+# configuration example with mod_macro
+#
+
+<VirtualHost www.foo.com>
+ DocumentRoot /foo/document/root/directory
+
+ <Macro SubDirAccessControl $subdir>
+ # access control to subdirs...
+ <Location /$subdir>
+ Warning "location /$subdir"
+ </Location>
+ </Macro>
+
+ # repeat uses
+ Use SubDirAccessControl A
+ Use SubDirAccessControl B
+ Use SubDirAccessControl C
+ Use SubDirAccessControl D
+ Use SubDirAccessControl E
+ Use SubDirAccessControl G
+ Use SubDirAccessControl H
+ Use SubDirAccessControl J
+ Use SubDirAccessControl K
+ Use SubDirAccessControl L
+ Use SubDirAccessControl M
+ Use SubDirAccessControl N
+
+ # cleanup
+ UndefMacro SubDirAccessControl
+
+</VirtualHost>
+
+Error Stop configuration file processing.
diff --git a/modules/core/test/conf/test41.conf b/modules/core/test/conf/test41.conf
new file mode 100755
index 00000000..c4e6bdbe
--- /dev/null
+++ b/modules/core/test/conf/test41.conf
@@ -0,0 +1,20 @@
+# another configuration example without mod_macro
+
+<VirtualHost www.foo.com>
+ DocumentRoot /foo/document/root/directory
+
+ <Location /A>
+ Warning "location /A"
+ </Location>
+
+ <Location /B>
+ Warning "location /B"
+ </Location>
+
+ <Location /C>
+ Warning "location /C"
+ </Location>
+
+</VirtualHost>
+
+Error Stop configuration file processing.
diff --git a/modules/core/test/conf/test42.conf b/modules/core/test/conf/test42.conf
new file mode 100755
index 00000000..a1426041
--- /dev/null
+++ b/modules/core/test/conf/test42.conf
@@ -0,0 +1,13 @@
+# multiple macro uses
+
+<Macro foo $p>
+ Warning "macro foo $p"
+</Macro>
+
+Use foo ''
+Use foo ''
+Use foo ''
+Use foo ''
+Use foo ''
+
+Error "done on line 13."
diff --git a/modules/core/test/conf/test43.conf b/modules/core/test/conf/test43.conf
new file mode 100755
index 00000000..264b9167
--- /dev/null
+++ b/modules/core/test/conf/test43.conf
@@ -0,0 +1,29 @@
+# non necessarily nested.
+
+<Macro begindir $dir>
+<Directory $dir>
+# hello
+</Macro>
+
+<Macro enddir>
+</Directory>
+</Macro>
+
+Use begindir /unexpected/1
+Use enddir
+
+
+Use begindir /unexpected/2
+Use enddir
+
+Use begindir /unexpected/3
+<Limit GET>
+</Limit>
+Use enddir
+
+<VirtualHost foo.com>
+Use begindir /unexpected/4
+Use enddir
+</VirtualHost>
+
+Error ok!
diff --git a/modules/core/test/conf/test44.conf b/modules/core/test/conf/test44.conf
new file mode 100755
index 00000000..ef4883eb
--- /dev/null
+++ b/modules/core/test/conf/test44.conf
@@ -0,0 +1,19 @@
+# working recursion...
+
+<Macro foo>
+use bla
+</Macro>
+
+<Macro bla>
+<IfDefine NoFoo>
+use foo
+</IfDefine>
+</Macro>
+
+
+<IfDefine !NoFoo>
+# foo gonna call bla, bla wont call foo back...
+use foo
+</IfDefine>
+
+Error okay.
diff --git a/modules/core/test/conf/test45.conf b/modules/core/test/conf/test45.conf
new file mode 100755
index 00000000..03949358
--- /dev/null
+++ b/modules/core/test/conf/test45.conf
@@ -0,0 +1,7 @@
+# strange chars
+
+<Macro warnings $1 %2 &3 @4 #5 ~6 *7 .8 ,9 !a -b +c =d :e ;f ?g>
+# hello $1 %2 &3 @4 #5 ~6 *7 .8 ,9 !a -b +c =d :e ;f ?g
+</Macro>
+
+Error "done on line 7."
diff --git a/modules/core/test/conf/test46.conf b/modules/core/test/conf/test46.conf
new file mode 100755
index 00000000..50520ed7
--- /dev/null
+++ b/modules/core/test/conf/test46.conf
@@ -0,0 +1,11 @@
+# various working prefixes
+
+<Macro $i %j @k>
+# hello %j @k
+</Macro>
+
+<Macro warnings $i second>
+# not used.
+</Macro>
+
+Error okay.
diff --git a/modules/core/test/conf/test47.conf b/modules/core/test/conf/test47.conf
new file mode 100755
index 00000000..6e736641
--- /dev/null
+++ b/modules/core/test/conf/test47.conf
@@ -0,0 +1,15 @@
+# empty macro contents...
+
+<Macro foo>
+</Macro>
+
+Use foo
+
+<Macro bla $i>
+</Macro>
+
+<Macro bof $i>
+# some contents...
+</Macro>
+
+Error okay.
diff --git a/modules/core/test/conf/test48.conf b/modules/core/test/conf/test48.conf
new file mode 100755
index 00000000..96bf4615
--- /dev/null
+++ b/modules/core/test/conf/test48.conf
@@ -0,0 +1,23 @@
+# test substitution...
+
+<Macro M %premier>
+Warning %premier
+</Macro>
+
+Use M 1
+Use M 12
+Use M 123
+Use M 1234
+Use M 12345
+Use M 123456
+Use M 1234567
+Use M 12345678
+Use M 123456789
+Use M 1234567890
+Use M 1234567890a
+Use M 1234567890ab
+Use M 1234567890abc
+Use M 1234567890abcd
+Use M 1234567890abcde
+
+Error "done line 23."
diff --git a/modules/core/test/conf/test49.conf b/modules/core/test/conf/test49.conf
new file mode 100644
index 00000000..7a21c823
--- /dev/null
+++ b/modules/core/test/conf/test49.conf
@@ -0,0 +1,2 @@
+# undef macro before anything
+UndefMacro foo
diff --git a/modules/core/test/conf/test50.conf b/modules/core/test/conf/test50.conf
new file mode 100644
index 00000000..33dd359f
--- /dev/null
+++ b/modules/core/test/conf/test50.conf
@@ -0,0 +1,5 @@
+# undef non existing macro
+<Macro foo>
+ Warning "foo macro"
+</Macro>
+UndefMacro bla
diff --git a/modules/core/test/conf/test51.conf b/modules/core/test/conf/test51.conf
new file mode 100644
index 00000000..50214fa3
--- /dev/null
+++ b/modules/core/test/conf/test51.conf
@@ -0,0 +1,9 @@
+# undef existing macro, and try to use it
+<Macro foo>
+ Warning "foo macro contents"
+</Macro>
+# expanded, but will not be processed because of error
+Use foo
+UndefMacro foo
+# error, does not exist anymore
+Use foo
diff --git a/modules/core/test/conf/test52.conf b/modules/core/test/conf/test52.conf
new file mode 100644
index 00000000..bb77c737
--- /dev/null
+++ b/modules/core/test/conf/test52.conf
@@ -0,0 +1,8 @@
+# undef existing macro, and try to use it
+<Macro foo>
+ Warning "foo macro contents line 1"
+</Macro>
+Use foo
+UndefMacro foo
+
+Error "done line 8."
diff --git a/modules/core/test/conf/test53.conf b/modules/core/test/conf/test53.conf
new file mode 100755
index 00000000..08e8c988
--- /dev/null
+++ b/modules/core/test/conf/test53.conf
@@ -0,0 +1,2 @@
+# use undefined macro without prior definition
+Use bla
diff --git a/modules/core/test/conf/test54.conf b/modules/core/test/conf/test54.conf
new file mode 100644
index 00000000..7dd30aca
--- /dev/null
+++ b/modules/core/test/conf/test54.conf
@@ -0,0 +1,6 @@
+# empty macro
+<Macro foo>
+</Macro>
+Use foo
+
+Error "done line 6."
diff --git a/modules/core/test/conf/test55.conf b/modules/core/test/conf/test55.conf
new file mode 100644
index 00000000..bd978e98
--- /dev/null
+++ b/modules/core/test/conf/test55.conf
@@ -0,0 +1,11 @@
+# line numbers...
+<Macro foo $where>
+ Warning "macro foo(:2) line 1 ($where)"
+</Macro>
+<Macro bla $where>
+ Warning "macro bla(:5) line 1 ($where)"
+ Use foo "bla line 2"
+</Macro>
+Use foo "file line 9"
+Use bla "file line 10"
+Error "done line 11."
diff --git a/modules/core/test/conf/test56.conf b/modules/core/test/conf/test56.conf
new file mode 100644
index 00000000..b7366a28
--- /dev/null
+++ b/modules/core/test/conf/test56.conf
@@ -0,0 +1,18 @@
+# nesting warnings
+<Macro Open $dir>
+ <Directory $dir>
+ Warning "Open:2 $dir"
+</Macro>
+<Macro Close>
+ Warning "Close:1"
+ </Directory>
+</Macro>
+
+# some uses
+Use Open /tmp
+Use Close
+
+Use Open /etc
+Use Close
+
+Error "done line 18."
diff --git a/modules/core/test/conf/test57.conf b/modules/core/test/conf/test57.conf
new file mode 100644
index 00000000..7c36868b
--- /dev/null
+++ b/modules/core/test/conf/test57.conf
@@ -0,0 +1,4 @@
+# empty argument name
+<Macro foo $x ''>
+ Warning "macro foo line 1"
+</Macro>
diff --git a/modules/core/test/conf/test58.conf b/modules/core/test/conf/test58.conf
new file mode 100644
index 00000000..6c8a2eb7
--- /dev/null
+++ b/modules/core/test/conf/test58.conf
@@ -0,0 +1,4 @@
+# bad directive closing
+<Macro foo
+ Warning "macro foo line 1"
+</Macro>
diff --git a/modules/core/test/conf/test59.conf b/modules/core/test/conf/test59.conf
new file mode 100644
index 00000000..9f43d7d3
--- /dev/null
+++ b/modules/core/test/conf/test59.conf
@@ -0,0 +1,4 @@
+# empty name
+<Macro ''>
+ Warning "empty quoted name macro"
+</Macro>
diff --git a/modules/core/test/conf/test60.conf b/modules/core/test/conf/test60.conf
new file mode 100644
index 00000000..969a4ebe
--- /dev/null
+++ b/modules/core/test/conf/test60.conf
@@ -0,0 +1,17 @@
+# @ escaping
+<Macro Foo $one $two>
+ Warning "macro Foo arg 1: $one"
+ Warning "macro Foo arg 2: $two"
+</Macro>
+<Macro Bla @first @second>
+ Warning Macro Bla arg 1: @first
+ Warning Macro Bla arg 2: @second
+ Use Foo @first 'second'
+ Use Foo 'first' @second
+ Use Foo @first @second
+</Macro>
+
+Use Foo hello world
+Use Bla "hello world" "thank you"
+
+Error "done on line 17."
diff --git a/modules/core/test/conf/test61.conf b/modules/core/test/conf/test61.conf
new file mode 100644
index 00000000..cd285978
--- /dev/null
+++ b/modules/core/test/conf/test61.conf
@@ -0,0 +1,18 @@
+# deep expansion
+<Macro F1 $x>
+ Warning "F1:1 x=$x"
+</Macro>
+<Macro F2 $x>
+ Warning "F2:1 x=$x"
+ Use F1 $x
+</Macro>
+<Macro F3 $x>
+ Warning "F3:1 x=$x"
+ Use F2 $x
+</Macro>
+<Macro F4 $x>
+ Warning "F4:1 x=$x"
+ Use F3 $x
+</Macro>
+Use F4 "line=17"
+Error "done line 18."
diff --git a/modules/core/test/conf/test62.conf b/modules/core/test/conf/test62.conf
new file mode 100644
index 00000000..9d611de5
--- /dev/null
+++ b/modules/core/test/conf/test62.conf
@@ -0,0 +1,25 @@
+# test continuations
+<Macro Line \
+ $start \
+ $stop>
+ Warning \
+ "Line:1-2 start at $start"
+ Warning \
+ "Line:3-4 stop at $stop"
+</Macro>
+
+Use Line 11 11
+Use Line \
+ 12 13
+Use Line \
+ 14 \
+ 16
+Use Line 17 \
+ 18
+Use Line \
+ \
+ 19 \
+ \
+ 23
+
+Error "done line 25."
diff --git a/modules/core/test/conf/test63.conf b/modules/core/test/conf/test63.conf
new file mode 100644
index 00000000..7988ae44
--- /dev/null
+++ b/modules/core/test/conf/test63.conf
@@ -0,0 +1,9 @@
+# include
+include ${PWD}/inc63_1.conf
+Use Foo "test63.conf:3"
+<Macro Bla $where>
+ Warning "Bla at $where"
+</Macro>
+include ${PWD}/inc63_2.conf
+Use Bla "test63.conf:8"
+Error "done at line 9."
diff --git a/modules/core/test/conf/test64.conf b/modules/core/test/conf/test64.conf
new file mode 100644
index 00000000..6c123281
--- /dev/null
+++ b/modules/core/test/conf/test64.conf
@@ -0,0 +1,5 @@
+# just continuations
+Warning "on line 2"
+Warning \
+ "from line 3 to line 4"
+Error "done on line 5."
diff --git a/modules/core/test/conf/test65.conf b/modules/core/test/conf/test65.conf
new file mode 100644
index 00000000..df9adc32
--- /dev/null
+++ b/modules/core/test/conf/test65.conf
@@ -0,0 +1,11 @@
+# simple use continuation
+<Macro Line $line>
+ # first macro line is a comment
+ Warning "Line: $line"
+</Macro>
+Use Line \
+ "on line 6-7"
+Use \
+ Line \
+ "on line 8-10"
+Error "done on line 11."
diff --git a/modules/core/test/conf/test66.conf b/modules/core/test/conf/test66.conf
new file mode 100644
index 00000000..a14e5878
--- /dev/null
+++ b/modules/core/test/conf/test66.conf
@@ -0,0 +1,7 @@
+# no double substitution
+<Macro Foo $x $y>
+ Warning "Foo: x=$x y=$y"
+</Macro>
+Use Foo X Y
+Use Foo "$y" "$x"
+Error "done on line 7."
diff --git a/modules/core/test/conf/test67.conf b/modules/core/test/conf/test67.conf
new file mode 100644
index 00000000..04a5d3d9
--- /dev/null
+++ b/modules/core/test/conf/test67.conf
@@ -0,0 +1 @@
+Error "done at line 1 without LF." \ No newline at end of file
diff --git a/modules/core/test/conf/test68.conf b/modules/core/test/conf/test68.conf
new file mode 100644
index 00000000..2a7b85bd
--- /dev/null
+++ b/modules/core/test/conf/test68.conf
@@ -0,0 +1,5 @@
+# two directives with continuations & no eol at eof
+Warning \
+ "line 2-3"
+Error \
+ "done on line 4-5." \ No newline at end of file
diff --git a/modules/core/test/conf/test69.conf b/modules/core/test/conf/test69.conf
new file mode 100644
index 00000000..11a08300
--- /dev/null
+++ b/modules/core/test/conf/test69.conf
@@ -0,0 +1,14 @@
+# warn if ignored non-blank stuff after closing '>'
+<Macro Foo> this stuff is ignored...
+ Warning "Foo"
+</Macro> this stuff is ignored as well...
+Use Foo
+<Macro Bla>
+ Warning "Bla"
+</Macro>
+Use Bla
+<Macro Comments> # comments are fine
+ Warning "Comments"
+</Macro> # comments are fine
+Use Comments
+Error "done on line 14."
diff --git a/modules/core/test/ref/test01.out b/modules/core/test/ref/test01.out
new file mode 100644
index 00000000..9ea66657
--- /dev/null
+++ b/modules/core/test/ref/test01.out
@@ -0,0 +1,3 @@
+# testing with conf/test01.conf
+httpd: Syntax error on line 2 of ./conf/test01.conf: <Macro macro definition: empty name
+# exit: 1
diff --git a/modules/core/test/ref/test02.out b/modules/core/test/ref/test02.out
new file mode 100644
index 00000000..6b491913
--- /dev/null
+++ b/modules/core/test/ref/test02.out
@@ -0,0 +1,3 @@
+# testing with conf/test02.conf
+httpd: Syntax error on line 2 of ./conf/test02.conf: <Macro macro definition: empty name
+# exit: 1
diff --git a/modules/core/test/ref/test03.out b/modules/core/test/ref/test03.out
new file mode 100644
index 00000000..f8579012
--- /dev/null
+++ b/modules/core/test/ref/test03.out
@@ -0,0 +1,3 @@
+# testing with conf/test03.conf
+httpd: Syntax error on line 5 of ./conf/test03.conf: macro 'bla' undefined
+# exit: 1
diff --git a/modules/core/test/ref/test04.out b/modules/core/test/ref/test04.out
new file mode 100644
index 00000000..aaa2e6bc
--- /dev/null
+++ b/modules/core/test/ref/test04.out
@@ -0,0 +1,3 @@
+# testing with conf/test04.conf
+httpd: Syntax error on line 5 of ./conf/test04.conf: macro 'foo' (defined on line 2 of "./conf/test04.conf") used with 1 arguments instead of 0
+# exit: 1
diff --git a/modules/core/test/ref/test05.out b/modules/core/test/ref/test05.out
new file mode 100644
index 00000000..184c40cd
--- /dev/null
+++ b/modules/core/test/ref/test05.out
@@ -0,0 +1,3 @@
+# testing with conf/test05.conf
+httpd: Syntax error on line 5 of ./conf/test05.conf: macro 'foo' (defined on line 2 of "./conf/test05.conf") used with 0 arguments instead of 1
+# exit: 1
diff --git a/modules/core/test/ref/test06.out b/modules/core/test/ref/test06.out
new file mode 100644
index 00000000..221dd057
--- /dev/null
+++ b/modules/core/test/ref/test06.out
@@ -0,0 +1,3 @@
+# testing with conf/test06.conf
+httpd: Syntax error on line 5 of ./conf/test06.conf: macro 'foo' (defined on line 2 of "./conf/test06.conf") used with 2 arguments instead of 1
+# exit: 1
diff --git a/modules/core/test/ref/test07.out b/modules/core/test/ref/test07.out
new file mode 100644
index 00000000..60003ecb
--- /dev/null
+++ b/modules/core/test/ref/test07.out
@@ -0,0 +1,3 @@
+# testing with conf/test07.conf
+httpd: Syntax error on line 2 of ./conf/test07.conf: macro 'foo' (defined on line 2 of "./conf/test07.conf")\n\tcontents error: expected token not found: </Macro>
+# exit: 1
diff --git a/modules/core/test/ref/test08.out b/modules/core/test/ref/test08.out
new file mode 100644
index 00000000..124c7a09
--- /dev/null
+++ b/modules/core/test/ref/test08.out
@@ -0,0 +1,3 @@
+# testing with conf/test08.conf
+httpd: Syntax error on line 3 of ./conf/test08.conf: </Macro> without matching <Macro> section
+# exit: 1
diff --git a/modules/core/test/ref/test09.out b/modules/core/test/ref/test09.out
new file mode 100644
index 00000000..9af12255
--- /dev/null
+++ b/modules/core/test/ref/test09.out
@@ -0,0 +1,3 @@
+# testing with conf/test09.conf
+httpd: Syntax error on line 1 of macro 'foo' (defined on line 2 of "./conf/test09.conf") used on line 6 of "./conf/test09.conf": recursive use of macro 'foo' is invalid
+# exit: 1
diff --git a/modules/core/test/ref/test10.out b/modules/core/test/ref/test10.out
new file mode 100644
index 00000000..4d81abcf
--- /dev/null
+++ b/modules/core/test/ref/test10.out
@@ -0,0 +1,3 @@
+# testing with conf/test10.conf
+httpd: Syntax error on line 1 of macro 'bla' (defined on line 6 of "./conf/test10.conf") used on line 1 of "macro 'foo' (defined on line 2 of "./conf/test10.conf") used on line 10 of "./conf/test10.conf"": recursive use of macro 'foo' is invalid
+# exit: 1
diff --git a/modules/core/test/ref/test11.out b/modules/core/test/ref/test11.out
new file mode 100644
index 00000000..d18c95eb
--- /dev/null
+++ b/modules/core/test/ref/test11.out
@@ -0,0 +1,6 @@
+# testing with conf/test11.conf
+[core:warn] macro one.in line 1 on line 1 of macro 'one.in' (defined on line 1 of "macro 'foo' (defined on line 2 of "./conf/test11.conf") used on line 9 of "./conf/test11.conf"") used on line 12 of "./conf/test11.conf"
+[core:error] done line 15. on line 15 of ./conf/test11.conf
+AH00526: Syntax error on line 15 of ./conf/test11.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test12.out b/modules/core/test/ref/test12.out
new file mode 100644
index 00000000..b1ab2348
--- /dev/null
+++ b/modules/core/test/ref/test12.out
@@ -0,0 +1,7 @@
+# testing with conf/test12.conf
+[macro:warn] macro 'foo' multiply defined: defined on line 2 of "./conf/test12.conf", redefined on line 6 of "./conf/test12.conf"
+[core:warn] macro foo 2, line 1 on line 1 of macro 'foo' (defined on line 6 of "./conf/test12.conf") used on line 10 of "./conf/test12.conf"
+[core:error] done line 12. on line 12 of ./conf/test12.conf
+AH00526: Syntax error on line 12 of ./conf/test12.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test13.out b/modules/core/test/ref/test13.out
new file mode 100644
index 00000000..13d501e3
--- /dev/null
+++ b/modules/core/test/ref/test13.out
@@ -0,0 +1,8 @@
+# testing with conf/test13.conf
+[macro:warn] macro 'foo' multiply defined: defined on line 2 of "./conf/test13.conf", redefined on line 12 of "./conf/test13.conf"
+[core:warn] macro FOO line 1 on line 1 of macro 'foo' (defined on line 2 of "./conf/test13.conf") used on line 10 of "./conf/test13.conf"
+[core:warn] redefined macro foo line 1 on line 1 of macro 'foo' (defined on line 12 of "./conf/test13.conf") used on line 16 of "./conf/test13.conf"
+[core:error] done line 18. on line 18 of ./conf/test13.conf
+AH00526: Syntax error on line 18 of ./conf/test13.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test14.out b/modules/core/test/ref/test14.out
new file mode 100644
index 00000000..16507157
--- /dev/null
+++ b/modules/core/test/ref/test14.out
@@ -0,0 +1,14 @@
+# testing with conf/test14.conf
+AH00112: Warning: DocumentRoot [/projects/apache/web] does not exist
+[core:warn] directory /projects/apache/web on line 5 of macro 'myvirtualhost' (defined on line 3 of "./conf/test14.conf") used on line 17 of "./conf/test14.conf"
+[core:warn] directory /projects/apache/web/intranet on line 8 of macro 'myvirtualhost' (defined on line 3 of "./conf/test14.conf") used on line 17 of "./conf/test14.conf"
+AH00112: Warning: DocumentRoot [/projects/perl/web] does not exist
+[core:warn] directory /projects/perl/web on line 5 of macro 'myvirtualhost' (defined on line 3 of "./conf/test14.conf") used on line 19 of "./conf/test14.conf"
+[core:warn] directory /projects/perl/web/intranet on line 8 of macro 'myvirtualhost' (defined on line 3 of "./conf/test14.conf") used on line 19 of "./conf/test14.conf"
+AH00112: Warning: DocumentRoot [/projects/mines/web] does not exist
+[core:warn] directory /projects/mines/web on line 5 of macro 'myvirtualhost' (defined on line 3 of "./conf/test14.conf") used on line 21 of "./conf/test14.conf"
+[core:warn] directory /projects/mines/web/intranet on line 8 of macro 'myvirtualhost' (defined on line 3 of "./conf/test14.conf") used on line 21 of "./conf/test14.conf"
+[core:error] done line 23. on line 23 of ./conf/test14.conf
+AH00526: Syntax error on line 23 of ./conf/test14.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test15.out b/modules/core/test/ref/test15.out
new file mode 100644
index 00000000..b0b82b7b
--- /dev/null
+++ b/modules/core/test/ref/test15.out
@@ -0,0 +1,6 @@
+# testing with conf/test15.conf
+[macro:warn] bad cumulated nesting (+1) in macro 'test' (defined on line 2 of "./conf/test15.conf")
+[core:error] should not reach this point. on line 9 of ./conf/test15.conf
+AH00526: Syntax error on line 9 of ./conf/test15.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test16.out b/modules/core/test/ref/test16.out
new file mode 100644
index 00000000..6e0f9cad
--- /dev/null
+++ b/modules/core/test/ref/test16.out
@@ -0,0 +1,5 @@
+# testing with conf/test16.conf
+[macro:warn] bad (negative) nesting on line 2 of macro 'foo' (defined on line 3 of "./conf/test16.conf")
+[macro:warn] bad cumulated nesting (-1) in macro 'foo' (defined on line 3 of "./conf/test16.conf")
+httpd: Syntax error on line 9 of ./conf/test16.conf: </Limit> without matching <Limit> section
+# exit: 1
diff --git a/modules/core/test/ref/test17.out b/modules/core/test/ref/test17.out
new file mode 100644
index 00000000..c6ca16d0
--- /dev/null
+++ b/modules/core/test/ref/test17.out
@@ -0,0 +1,7 @@
+# testing with conf/test17.conf
+[macro:warn] bad (negative) nesting on line 2 of macro 'foo' (defined on line 3 of "./conf/test17.conf")
+[macro:warn] bad cumulated nesting (-1) in macro 'foo' (defined on line 3 of "./conf/test17.conf")
+[core:error] done on line 10. on line 10 of ./conf/test17.conf
+AH00526: Syntax error on line 10 of ./conf/test17.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test18.out b/modules/core/test/ref/test18.out
new file mode 100644
index 00000000..c5cee819
--- /dev/null
+++ b/modules/core/test/ref/test18.out
@@ -0,0 +1,7 @@
+# testing with conf/test18.conf
+[macro:warn] bad (negative) nesting on line 2 of macro 'foo' (defined on line 3 of "./conf/test18.conf")
+[macro:warn] bad cumulated nesting (-1) in macro 'foo' (defined on line 3 of "./conf/test18.conf")
+[core:error] done on line 10. on line 10 of ./conf/test18.conf
+AH00526: Syntax error on line 10 of ./conf/test18.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test19.out b/modules/core/test/ref/test19.out
new file mode 100644
index 00000000..411e5694
--- /dev/null
+++ b/modules/core/test/ref/test19.out
@@ -0,0 +1,9 @@
+# testing with conf/test19.conf
+[core:warn] macro foo line 2 in Directory on line 1 of macro 'foo' (defined on line 3 of "./conf/test19.conf") used on line 9 of "./conf/test19.conf"
+[core:warn] macro foo line 2 in Location on line 1 of macro 'foo' (defined on line 3 of "./conf/test19.conf") used on line 13 of "./conf/test19.conf"
+[core:warn] macro foo line 2 in VirtualHost on line 1 of macro 'foo' (defined on line 3 of "./conf/test19.conf") used on line 17 of "./conf/test19.conf"
+[core:warn] macro foo line 2 in VirtualHost & Directory on line 1 of macro 'foo' (defined on line 3 of "./conf/test19.conf") used on line 22 of "./conf/test19.conf"
+[core:error] done line 26. on line 26 of ./conf/test19.conf
+AH00526: Syntax error on line 26 of ./conf/test19.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test20.out b/modules/core/test/ref/test20.out
new file mode 100644
index 00000000..3ce2b607
--- /dev/null
+++ b/modules/core/test/ref/test20.out
@@ -0,0 +1,4 @@
+# testing with conf/test20.conf
+AH00526: Syntax error on line 1 of macro 'foo' (defined on line 3 of "./conf/test20.conf") used on line 10 of "./conf/test20.conf":
+<Directory not allowed here
+# exit: 1
diff --git a/modules/core/test/ref/test21.out b/modules/core/test/ref/test21.out
new file mode 100644
index 00000000..ac8d843b
--- /dev/null
+++ b/modules/core/test/ref/test21.out
@@ -0,0 +1,5 @@
+# testing with conf/test21.conf
+[core:error] macro foo dir /tmp on line 2 of macro 'foo' (defined on line 3 of "./conf/test21.conf") used on line 10 of "./conf/test21.conf"
+AH00526: Syntax error on line 2 of macro 'foo' (defined on line 3 of "./conf/test21.conf") used on line 10 of "./conf/test21.conf":
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test22.out b/modules/core/test/ref/test22.out
new file mode 100644
index 00000000..def17dd3
--- /dev/null
+++ b/modules/core/test/ref/test22.out
@@ -0,0 +1,6 @@
+# testing with conf/test22.conf
+[core:warn] macro foo on line 2 of macro 'foo' (defined on line 3 of "./conf/test22.conf") used on line 9 of "./conf/test22.conf"
+[core:error] done on line 11. on line 11 of ./conf/test22.conf
+AH00526: Syntax error on line 11 of ./conf/test22.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test23.out b/modules/core/test/ref/test23.out
new file mode 100644
index 00000000..827c861a
--- /dev/null
+++ b/modules/core/test/ref/test23.out
@@ -0,0 +1,7 @@
+# testing with conf/test23.conf
+[core:warn] macro foo in /tmp on line 1 of macro 'foo' (defined on line 4 of "./conf/test23.conf") used on line 9 of "./conf/test23.conf"
+[core:warn] macro foo in /tmp on line 1 of macro 'foo' (defined on line 4 of "./conf/test23.conf") used on line 12 of "./conf/test23.conf"
+[core:error] done! on line 15 of ./conf/test23.conf
+AH00526: Syntax error on line 15 of ./conf/test23.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test24.out b/modules/core/test/ref/test24.out
new file mode 100644
index 00000000..e5d370dc
--- /dev/null
+++ b/modules/core/test/ref/test24.out
@@ -0,0 +1,8 @@
+# testing with conf/test24.conf
+[core:warn] macro bla intra on line 2 of macro 'bla' (defined on line 3 of "./conf/test24.conf") used on line 13 of "./conf/test24.conf"
+[core:warn] macro bla private on line 5 of macro 'bla' (defined on line 3 of "./conf/test24.conf") used on line 13 of "./conf/test24.conf"
+[core:warn] macro bla intra on line 2 of macro 'bla' (defined on line 3 of "./conf/test24.conf") used on line 17 of "./conf/test24.conf"
+[core:warn] macro bla private on line 5 of macro 'bla' (defined on line 3 of "./conf/test24.conf") used on line 17 of "./conf/test24.conf"
+AH00526: Syntax error on line 1 of macro 'bla' (defined on line 3 of "./conf/test24.conf") used on line 22 of "./conf/test24.conf":
+<Location not allowed here
+# exit: 1
diff --git a/modules/core/test/ref/test25.out b/modules/core/test/ref/test25.out
new file mode 100644
index 00000000..56bb0bd3
--- /dev/null
+++ b/modules/core/test/ref/test25.out
@@ -0,0 +1,9 @@
+# testing with conf/test25.conf
+[core:warn] restricted access policy 10.0.0.0/8 on line 1 of macro 'restrictedaccesspolicy' (defined on line 3 of "./conf/test25.conf") used on line 8 of "./conf/test25.conf"
+[core:warn] restricted access policy 192.54.172.0/24 192.54.148.0/24 10.0.0.0/8 on line 1 of macro 'restrictedaccesspolicy' (defined on line 3 of "./conf/test25.conf") used on line 16 of "./conf/test25.conf"
+[core:warn] restricted access policy 10.0.0.0/8 on line 1 of macro 'restrictedaccesspolicy' (defined on line 3 of "./conf/test25.conf") used on line 1 of "macro 'localaccessonly' (defined on line 11 of "./conf/test25.conf") used on line 20 of "./conf/test25.conf""
+[core:warn] restricted access policy 10.0.0.0/8 on line 1 of macro 'restrictedaccesspolicy' (defined on line 3 of "./conf/test25.conf") used on line 1 of "macro 'localaccessonly' (defined on line 11 of "./conf/test25.conf") used on line 24 of "./conf/test25.conf""
+[core:error] done line 27. on line 27 of ./conf/test25.conf
+AH00526: Syntax error on line 27 of ./conf/test25.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test26.out b/modules/core/test/ref/test26.out
new file mode 100644
index 00000000..28cba28d
--- /dev/null
+++ b/modules/core/test/ref/test26.out
@@ -0,0 +1,11 @@
+# testing with conf/test26.conf
+[macro:warn] macro 'funny' (defined on line 4 of "./conf/test26.conf") argument name 'first arg' (#1) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'funny' (defined on line 4 of "./conf/test26.conf") argument name 'second ... arg' (#2) without expected prefix, better prefix argument names with one of '$%@'.
+[core:warn] funny directory on line 2 of macro 'funny' (defined on line 4 of "./conf/test26.conf") used on line 13 of "./conf/test26.conf"
+[core:warn] funny location on line 5 of macro 'funny' (defined on line 4 of "./conf/test26.conf") used on line 13 of "./conf/test26.conf"
+[core:warn] funny directory on line 2 of macro 'funny' (defined on line 4 of "./conf/test26.conf") used on line 16 of "./conf/test26.conf"
+[core:warn] funny location on line 5 of macro 'funny' (defined on line 4 of "./conf/test26.conf") used on line 16 of "./conf/test26.conf"
+[core:error] done! on line 19 of ./conf/test26.conf
+AH00526: Syntax error on line 19 of ./conf/test26.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test27.out b/modules/core/test/ref/test27.out
new file mode 100644
index 00000000..46aa8ea6
--- /dev/null
+++ b/modules/core/test/ref/test27.out
@@ -0,0 +1,8 @@
+# testing with conf/test27.conf
+[core:warn] foo.one /unexpected/1 on line 2 of macro 'foo.one' (defined on line 1 of "macro 'foo' (defined on line 3 of "./conf/test27.conf") used on line 11 of "./conf/test27.conf"") used on line 14 of "./conf/test27.conf"
+[core:warn] foo.two /unexpected/2 on line 2 of macro 'foo.two' (defined on line 1 of "macro 'foo' (defined on line 3 of "./conf/test27.conf") used on line 12 of "./conf/test27.conf"") used on line 15 of "./conf/test27.conf"
+[core:warn] foo.one /unexpected/1 on line 2 of macro 'foo.one' (defined on line 1 of "macro 'foo' (defined on line 3 of "./conf/test27.conf") used on line 11 of "./conf/test27.conf"") used on line 16 of "./conf/test27.conf"
+[core:error] done! on line 22 of ./conf/test27.conf
+AH00526: Syntax error on line 22 of ./conf/test27.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test28.out b/modules/core/test/ref/test28.out
new file mode 100644
index 00000000..eb0a23f3
--- /dev/null
+++ b/modules/core/test/ref/test28.out
@@ -0,0 +1,6 @@
+# testing with conf/test28.conf
+[core:warn] macro foo on line 1 of macro 'foo' (defined on line 4 of "./conf/test28.conf") used on line 8 of "./conf/test28.conf"
+[core:error] done! on line 10 of ./conf/test28.conf
+AH00526: Syntax error on line 10 of ./conf/test28.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test29.out b/modules/core/test/ref/test29.out
new file mode 100644
index 00000000..48488f9b
--- /dev/null
+++ b/modules/core/test/ref/test29.out
@@ -0,0 +1,4 @@
+# testing with conf/test29.conf
+[macro:warn] macro 'toobigaline' (defined on line 3 of "./conf/test29.conf") argument name 'a' (#1) without expected prefix, better prefix argument names with one of '$%@'.
+httpd: Syntax error on line 8 of ./conf/test29.conf: macro 'toobigaline' (defined on line 3 of "./conf/test29.conf") used on line 8 of "./conf/test29.conf" error while substituting: while processing line 1 of macro 'toobigaline' (defined on line 3 of "./conf/test29.conf") cannot substitute, buffer size too small
+# exit: 1
diff --git a/modules/core/test/ref/test30.out b/modules/core/test/ref/test30.out
new file mode 100644
index 00000000..a84c69f4
--- /dev/null
+++ b/modules/core/test/ref/test30.out
@@ -0,0 +1,7 @@
+# testing with conf/test30.conf
+[macro:warn] macro 'foo' (defined on line 4 of "./conf/test30.conf"): argument name prefix conflict ($dir #1 and $directive #2), be careful about your macro definition!
+[core:warn] section Directory /unexpected/1 on line 2 of macro 'foo' (defined on line 4 of "./conf/test30.conf") used on line 10 of "./conf/test30.conf"
+[core:error] done! on line 12 of ./conf/test30.conf
+AH00526: Syntax error on line 12 of ./conf/test30.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test31.out b/modules/core/test/ref/test31.out
new file mode 100644
index 00000000..831a7841
--- /dev/null
+++ b/modules/core/test/ref/test31.out
@@ -0,0 +1,23 @@
+# testing with conf/test31.conf
+[macro:warn] macro 'bla' (defined on line 3 of "./conf/test31.conf"): argument name prefix conflict ($dir #1 and $di #2), be careful about your macro definition!
+[macro:warn] macro 'bla' (defined on line 3 of "./conf/test31.conf"): argument name prefix conflict ($dir #1 and $d #4), be careful about your macro definition!
+[macro:warn] macro 'bla' (defined on line 3 of "./conf/test31.conf"): argument name prefix conflict ($di #2 and $d #4), be careful about your macro definition!
+[macro:warn] macro 'bla' (defined on line 3 of "./conf/test31.conf"): argument name prefix conflict ($dd #3 and $d #4), be careful about your macro definition!
+[macro:warn] macro 'bla' (defined on line 3 of "./conf/test31.conf") used on line 8 of "./conf/test31.conf": empty argument #1
+[macro:warn] macro 'bla' (defined on line 3 of "./conf/test31.conf") used on line 8 of "./conf/test31.conf": empty argument #2
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument name prefix conflict ($d #1 and $di #2), be careful about your macro definition!
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument name prefix conflict ($d #1 and $dir #3), be careful about your macro definition!
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument name prefix conflict ($d #1 and $dd #4), be careful about your macro definition!
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument name prefix conflict ($di #2 and $dir #3), be careful about your macro definition!
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument '$d' (#1) never used
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument '$di' (#2) never used
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument '$dir' (#3) never used
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf"): argument '$dd' (#4) never used
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf") used on line 14 of "./conf/test31.conf": empty argument #1
+[macro:warn] macro 'foo' (defined on line 10 of "./conf/test31.conf") used on line 14 of "./conf/test31.conf": empty argument #2
+[core:warn] argument name conflicts on line 1 of macro 'bla' (defined on line 3 of "./conf/test31.conf") used on line 8 of "./conf/test31.conf"
+[core:warn] conflicts, but arguments are not used on line 1 of macro 'foo' (defined on line 10 of "./conf/test31.conf") used on line 14 of "./conf/test31.conf"
+[core:error] done on line 16. on line 16 of ./conf/test31.conf
+AH00526: Syntax error on line 16 of ./conf/test31.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test32.out b/modules/core/test/ref/test32.out
new file mode 100644
index 00000000..b1620afb
--- /dev/null
+++ b/modules/core/test/ref/test32.out
@@ -0,0 +1,3 @@
+# testing with conf/test32.conf
+httpd: Syntax error on line 3 of ./conf/test32.conf: argument name conflict in macro 'foo' (defined on line 3 of "./conf/test32.conf"): argument '$arg2': #2 and #4, change argument names!
+# exit: 1
diff --git a/modules/core/test/ref/test33.out b/modules/core/test/ref/test33.out
new file mode 100644
index 00000000..cc5b3d09
--- /dev/null
+++ b/modules/core/test/ref/test33.out
@@ -0,0 +1,3 @@
+# testing with conf/test33.conf
+httpd: Syntax error on line 3 of ./conf/test33.conf: no macro defined before Use
+# exit: 1
diff --git a/modules/core/test/ref/test34.out b/modules/core/test/ref/test34.out
new file mode 100644
index 00000000..1264a546
--- /dev/null
+++ b/modules/core/test/ref/test34.out
@@ -0,0 +1,13 @@
+# testing with conf/test34.conf
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test34.conf"): argument name prefix conflict ($d #1 and $dd #2), be careful about your macro definition!
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test34.conf"): argument '$d' (#1) never used
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test34.conf"): argument '$dd' (#2) never used
+[macro:warn] macro 'bla' (defined on line 7 of "./conf/test34.conf"): argument name prefix conflict ($dd #1 and $d #2), be careful about your macro definition!
+[macro:warn] macro 'bla' (defined on line 7 of "./conf/test34.conf"): argument '$dd' (#1) never used
+[macro:warn] macro 'bla' (defined on line 7 of "./conf/test34.conf"): argument '$d' (#2) never used
+[core:warn] macro foo conflict one on line 1 of macro 'foo' (defined on line 3 of "./conf/test34.conf") used on line 11 of "./conf/test34.conf"
+[core:warn] macro bla conflict two on line 1 of macro 'bla' (defined on line 7 of "./conf/test34.conf") used on line 12 of "./conf/test34.conf"
+[core:error] done on line 14. on line 14 of ./conf/test34.conf
+AH00526: Syntax error on line 14 of ./conf/test34.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test35.out b/modules/core/test/ref/test35.out
new file mode 100644
index 00000000..fb562383
--- /dev/null
+++ b/modules/core/test/ref/test35.out
@@ -0,0 +1,13 @@
+# testing with conf/test35.conf
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test35.conf") argument name 'u1' (#1) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test35.conf") argument name 'u2' (#2) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test35.conf") argument name 'n1' (#3) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test35.conf") argument name 'n2' (#4) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test35.conf") argument name 'u3' (#5) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test35.conf"): argument 'n1' (#3) never used
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test35.conf"): argument 'n2' (#4) never used
+[core:warn] macro cannot be used just within a comment 1 2 5 on line 1 of macro 'warnings' (defined on line 3 of "./conf/test35.conf") used on line 8 of "./conf/test35.conf"
+[core:error] done on line 10. on line 10 of ./conf/test35.conf
+AH00526: Syntax error on line 10 of ./conf/test35.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test36.out b/modules/core/test/ref/test36.out
new file mode 100644
index 00000000..4c627d7d
--- /dev/null
+++ b/modules/core/test/ref/test36.out
@@ -0,0 +1,20 @@
+# testing with conf/test36.conf
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf"): argument name prefix conflict ($u #1 and $u1 #3), be careful about your macro definition!
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf"): argument name prefix conflict ($u #1 and $u2 #5), be careful about your macro definition!
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf"): argument name prefix conflict ($n #2 and $n1 #4), be careful about your macro definition!
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf"): argument name prefix conflict ($n #2 and $n2 #6), be careful about your macro definition!
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf"): argument '$n' (#2) never used
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf"): argument '$n1' (#4) never used
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf"): argument '$n2' (#6) never used
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 10 of "./conf/test36.conf": empty argument #1
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 10 of "./conf/test36.conf": empty argument #2
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 10 of "./conf/test36.conf": empty argument #3
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 10 of "./conf/test36.conf": empty argument #4
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 10 of "./conf/test36.conf": empty argument #5
+[macro:warn] macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 10 of "./conf/test36.conf": empty argument #6
+[core:warn] many warnings! 1 3 5 on line 1 of macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 7 of "./conf/test36.conf"
+[core:warn] many warnings! on line 1 of macro 'warnings' (defined on line 1 of "./conf/test36.conf") used on line 10 of "./conf/test36.conf"
+[core:error] done! on line 12 of ./conf/test36.conf
+AH00526: Syntax error on line 12 of ./conf/test36.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test37.out b/modules/core/test/ref/test37.out
new file mode 100644
index 00000000..fc119c82
--- /dev/null
+++ b/modules/core/test/ref/test37.out
@@ -0,0 +1,3 @@
+# testing with conf/test37.conf
+httpd: Syntax error on line 3 of ./conf/test37.conf: macro 'stupid' (defined on line 3 of "./conf/test37.conf"): empty argument #1 name
+# exit: 1
diff --git a/modules/core/test/ref/test38.out b/modules/core/test/ref/test38.out
new file mode 100644
index 00000000..ed3f1bb0
--- /dev/null
+++ b/modules/core/test/ref/test38.out
@@ -0,0 +1,6 @@
+# testing with conf/test38.conf
+[core:warn] it is really a good idea to have mod_macro.c installed. on line 4 of ./conf/test38.conf
+[core:error] it seems you do not have mod perl installed. on line 8 of ./conf/test38.conf
+AH00526: Syntax error on line 8 of ./conf/test38.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test39.out b/modules/core/test/ref/test39.out
new file mode 100644
index 00000000..ea2cede9
--- /dev/null
+++ b/modules/core/test/ref/test39.out
@@ -0,0 +1,7 @@
+# testing with conf/test39.conf
+[core:warn] Thanks for using mod_macro! on line 1 of macro 'modmacro' (defined on line 4 of "./conf/test39.conf") used on line 15 of "./conf/test39.conf"
+[core:warn] macro foo on line 1 of macro 'foo' (defined on line 17 of "./conf/test39.conf") used on line 21 of "./conf/test39.conf"
+[core:error] done! on line 23 of ./conf/test39.conf
+AH00526: Syntax error on line 23 of ./conf/test39.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test40.out b/modules/core/test/ref/test40.out
new file mode 100644
index 00000000..e3c5971a
--- /dev/null
+++ b/modules/core/test/ref/test40.out
@@ -0,0 +1,18 @@
+# testing with conf/test40.conf
+AH00112: Warning: DocumentRoot [/foo/document/root/directory] does not exist
+[core:warn] location /A on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 15 of "./conf/test40.conf"
+[core:warn] location /B on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 16 of "./conf/test40.conf"
+[core:warn] location /C on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 17 of "./conf/test40.conf"
+[core:warn] location /D on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 18 of "./conf/test40.conf"
+[core:warn] location /E on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 19 of "./conf/test40.conf"
+[core:warn] location /G on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 20 of "./conf/test40.conf"
+[core:warn] location /H on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 21 of "./conf/test40.conf"
+[core:warn] location /J on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 22 of "./conf/test40.conf"
+[core:warn] location /K on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 23 of "./conf/test40.conf"
+[core:warn] location /L on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 24 of "./conf/test40.conf"
+[core:warn] location /M on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 25 of "./conf/test40.conf"
+[core:warn] location /N on line 2 of macro 'subdiraccesscontrol' (defined on line 7 of "./conf/test40.conf") used on line 26 of "./conf/test40.conf"
+[core:error] Stop configuration file processing. on line 33 of ./conf/test40.conf
+AH00526: Syntax error on line 33 of ./conf/test40.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test41.out b/modules/core/test/ref/test41.out
new file mode 100644
index 00000000..3bef2853
--- /dev/null
+++ b/modules/core/test/ref/test41.out
@@ -0,0 +1,9 @@
+# testing with conf/test41.conf
+AH00112: Warning: DocumentRoot [/foo/document/root/directory] does not exist
+[core:warn] location /A on line 7 of ./conf/test41.conf
+[core:warn] location /B on line 11 of ./conf/test41.conf
+[core:warn] location /C on line 15 of ./conf/test41.conf
+[core:error] Stop configuration file processing. on line 20 of ./conf/test41.conf
+AH00526: Syntax error on line 20 of ./conf/test41.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test42.out b/modules/core/test/ref/test42.out
new file mode 100644
index 00000000..bab1510b
--- /dev/null
+++ b/modules/core/test/ref/test42.out
@@ -0,0 +1,15 @@
+# testing with conf/test42.conf
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 7 of "./conf/test42.conf": empty argument #1
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 8 of "./conf/test42.conf": empty argument #1
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 9 of "./conf/test42.conf": empty argument #1
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 10 of "./conf/test42.conf": empty argument #1
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 11 of "./conf/test42.conf": empty argument #1
+[core:warn] macro foo on line 1 of macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 7 of "./conf/test42.conf"
+[core:warn] macro foo on line 1 of macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 8 of "./conf/test42.conf"
+[core:warn] macro foo on line 1 of macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 9 of "./conf/test42.conf"
+[core:warn] macro foo on line 1 of macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 10 of "./conf/test42.conf"
+[core:warn] macro foo on line 1 of macro 'foo' (defined on line 3 of "./conf/test42.conf") used on line 11 of "./conf/test42.conf"
+[core:error] done on line 13. on line 13 of ./conf/test42.conf
+AH00526: Syntax error on line 13 of ./conf/test42.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test43.out b/modules/core/test/ref/test43.out
new file mode 100644
index 00000000..04a72d8a
--- /dev/null
+++ b/modules/core/test/ref/test43.out
@@ -0,0 +1,8 @@
+# testing with conf/test43.conf
+[macro:warn] bad cumulated nesting (+1) in macro 'begindir' (defined on line 3 of "./conf/test43.conf")
+[macro:warn] bad (negative) nesting on line 2 of macro 'enddir' (defined on line 8 of "./conf/test43.conf")
+[macro:warn] bad cumulated nesting (-1) in macro 'enddir' (defined on line 8 of "./conf/test43.conf")
+[core:error] ok! on line 29 of ./conf/test43.conf
+AH00526: Syntax error on line 29 of ./conf/test43.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test44.out b/modules/core/test/ref/test44.out
new file mode 100644
index 00000000..e744cda6
--- /dev/null
+++ b/modules/core/test/ref/test44.out
@@ -0,0 +1,5 @@
+# testing with conf/test44.conf
+[core:error] okay. on line 19 of ./conf/test44.conf
+AH00526: Syntax error on line 19 of ./conf/test44.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test45.out b/modules/core/test/ref/test45.out
new file mode 100644
index 00000000..daadaac7
--- /dev/null
+++ b/modules/core/test/ref/test45.out
@@ -0,0 +1,19 @@
+# testing with conf/test45.conf
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '&3' (#3) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '#5' (#5) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '~6' (#6) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '*7' (#7) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '.8' (#8) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name ',9' (#9) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '!a' (#10) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '-b' (#11) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '+c' (#12) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '=d' (#13) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name ':e' (#14) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name ';f' (#15) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf") argument name '?g' (#16) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 3 of "./conf/test45.conf"): empty contents!
+[core:error] done on line 7. on line 7 of ./conf/test45.conf
+AH00526: Syntax error on line 7 of ./conf/test45.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test46.out b/modules/core/test/ref/test46.out
new file mode 100644
index 00000000..8059744e
--- /dev/null
+++ b/modules/core/test/ref/test46.out
@@ -0,0 +1,9 @@
+# testing with conf/test46.conf
+[macro:warn] macro '$i' (defined on line 3 of "./conf/test46.conf") better prefix a macro name with any of '$%@'
+[macro:warn] macro '$i' (defined on line 3 of "./conf/test46.conf"): empty contents!
+[macro:warn] macro 'warnings' (defined on line 7 of "./conf/test46.conf") argument name 'second' (#2) without expected prefix, better prefix argument names with one of '$%@'.
+[macro:warn] macro 'warnings' (defined on line 7 of "./conf/test46.conf"): empty contents!
+[core:error] okay. on line 11 of ./conf/test46.conf
+AH00526: Syntax error on line 11 of ./conf/test46.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test47.out b/modules/core/test/ref/test47.out
new file mode 100644
index 00000000..58fb3cc6
--- /dev/null
+++ b/modules/core/test/ref/test47.out
@@ -0,0 +1,8 @@
+# testing with conf/test47.conf
+[macro:warn] macro 'foo' (defined on line 3 of "./conf/test47.conf"): empty contents!
+[macro:warn] macro 'bla' (defined on line 8 of "./conf/test47.conf"): empty contents!
+[macro:warn] macro 'bof' (defined on line 11 of "./conf/test47.conf"): empty contents!
+[core:error] okay. on line 15 of ./conf/test47.conf
+AH00526: Syntax error on line 15 of ./conf/test47.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test48.out b/modules/core/test/ref/test48.out
new file mode 100644
index 00000000..6ac5e993
--- /dev/null
+++ b/modules/core/test/ref/test48.out
@@ -0,0 +1,20 @@
+# testing with conf/test48.conf
+[core:warn] 1 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 7 of "./conf/test48.conf"
+[core:warn] 12 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 8 of "./conf/test48.conf"
+[core:warn] 123 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 9 of "./conf/test48.conf"
+[core:warn] 1234 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 10 of "./conf/test48.conf"
+[core:warn] 12345 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 11 of "./conf/test48.conf"
+[core:warn] 123456 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 12 of "./conf/test48.conf"
+[core:warn] 1234567 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 13 of "./conf/test48.conf"
+[core:warn] 12345678 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 14 of "./conf/test48.conf"
+[core:warn] 123456789 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 15 of "./conf/test48.conf"
+[core:warn] 1234567890 on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 16 of "./conf/test48.conf"
+[core:warn] 1234567890a on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 17 of "./conf/test48.conf"
+[core:warn] 1234567890ab on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 18 of "./conf/test48.conf"
+[core:warn] 1234567890abc on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 19 of "./conf/test48.conf"
+[core:warn] 1234567890abcd on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 20 of "./conf/test48.conf"
+[core:warn] 1234567890abcde on line 1 of macro 'm' (defined on line 3 of "./conf/test48.conf") used on line 21 of "./conf/test48.conf"
+[core:error] done line 23. on line 23 of ./conf/test48.conf
+AH00526: Syntax error on line 23 of ./conf/test48.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test49.out b/modules/core/test/ref/test49.out
new file mode 100644
index 00000000..5e83e02e
--- /dev/null
+++ b/modules/core/test/ref/test49.out
@@ -0,0 +1,3 @@
+# testing with conf/test49.conf
+httpd: Syntax error on line 2 of ./conf/test49.conf: no macro defined before UndefMacro
+# exit: 1
diff --git a/modules/core/test/ref/test50.out b/modules/core/test/ref/test50.out
new file mode 100644
index 00000000..477e8549
--- /dev/null
+++ b/modules/core/test/ref/test50.out
@@ -0,0 +1,3 @@
+# testing with conf/test50.conf
+httpd: Syntax error on line 5 of ./conf/test50.conf: cannot remove undefined macro 'bla'
+# exit: 1
diff --git a/modules/core/test/ref/test51.out b/modules/core/test/ref/test51.out
new file mode 100644
index 00000000..be9cc17a
--- /dev/null
+++ b/modules/core/test/ref/test51.out
@@ -0,0 +1,3 @@
+# testing with conf/test51.conf
+httpd: Syntax error on line 9 of ./conf/test51.conf: macro 'foo' undefined
+# exit: 1
diff --git a/modules/core/test/ref/test52.out b/modules/core/test/ref/test52.out
new file mode 100644
index 00000000..f41b7d68
--- /dev/null
+++ b/modules/core/test/ref/test52.out
@@ -0,0 +1,6 @@
+# testing with conf/test52.conf
+[core:warn] foo macro contents line 1 on line 1 of macro 'foo' (defined on line 2 of "./conf/test52.conf") used on line 5 of "./conf/test52.conf"
+[core:error] done line 8. on line 8 of ./conf/test52.conf
+AH00526: Syntax error on line 8 of ./conf/test52.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test53.out b/modules/core/test/ref/test53.out
new file mode 100644
index 00000000..2fb3852d
--- /dev/null
+++ b/modules/core/test/ref/test53.out
@@ -0,0 +1,3 @@
+# testing with conf/test53.conf
+httpd: Syntax error on line 2 of ./conf/test53.conf: no macro defined before Use
+# exit: 1
diff --git a/modules/core/test/ref/test54.out b/modules/core/test/ref/test54.out
new file mode 100644
index 00000000..814b491e
--- /dev/null
+++ b/modules/core/test/ref/test54.out
@@ -0,0 +1,6 @@
+# testing with conf/test54.conf
+[macro:warn] macro 'foo' (defined on line 2 of "./conf/test54.conf"): empty contents!
+[core:error] done line 6. on line 6 of ./conf/test54.conf
+AH00526: Syntax error on line 6 of ./conf/test54.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test55.out b/modules/core/test/ref/test55.out
new file mode 100644
index 00000000..c3590f95
--- /dev/null
+++ b/modules/core/test/ref/test55.out
@@ -0,0 +1,8 @@
+# testing with conf/test55.conf
+[core:warn] macro foo(:2) line 1 (file line 9) on line 1 of macro 'foo' (defined on line 2 of "./conf/test55.conf") used on line 9 of "./conf/test55.conf"
+[core:warn] macro bla(:5) line 1 (file line 10) on line 1 of macro 'bla' (defined on line 5 of "./conf/test55.conf") used on line 10 of "./conf/test55.conf"
+[core:warn] macro foo(:2) line 1 (bla line 2) on line 1 of macro 'foo' (defined on line 2 of "./conf/test55.conf") used on line 2 of "macro 'bla' (defined on line 5 of "./conf/test55.conf") used on line 10 of "./conf/test55.conf""
+[core:error] done line 11. on line 11 of ./conf/test55.conf
+AH00526: Syntax error on line 11 of ./conf/test55.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test56.out b/modules/core/test/ref/test56.out
new file mode 100644
index 00000000..f2a0b6e2
--- /dev/null
+++ b/modules/core/test/ref/test56.out
@@ -0,0 +1,12 @@
+# testing with conf/test56.conf
+[macro:warn] bad cumulated nesting (+1) in macro 'open' (defined on line 2 of "./conf/test56.conf")
+[macro:warn] bad (negative) nesting on line 3 of macro 'close' (defined on line 6 of "./conf/test56.conf")
+[macro:warn] bad cumulated nesting (-1) in macro 'close' (defined on line 6 of "./conf/test56.conf")
+[core:warn] Open:2 /tmp on line 2 of macro 'open' (defined on line 2 of "./conf/test56.conf") used on line 12 of "./conf/test56.conf"
+[core:warn] Close:1 on line 1 of macro 'close' (defined on line 6 of "./conf/test56.conf") used on line 13 of "./conf/test56.conf"
+[core:warn] Open:2 /etc on line 2 of macro 'open' (defined on line 2 of "./conf/test56.conf") used on line 15 of "./conf/test56.conf"
+[core:warn] Close:1 on line 1 of macro 'close' (defined on line 6 of "./conf/test56.conf") used on line 16 of "./conf/test56.conf"
+[core:error] done line 18. on line 18 of ./conf/test56.conf
+AH00526: Syntax error on line 18 of ./conf/test56.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test57.out b/modules/core/test/ref/test57.out
new file mode 100644
index 00000000..77a19011
--- /dev/null
+++ b/modules/core/test/ref/test57.out
@@ -0,0 +1,3 @@
+# testing with conf/test57.conf
+httpd: Syntax error on line 2 of ./conf/test57.conf: macro 'foo' (defined on line 2 of "./conf/test57.conf"): empty argument #2 name
+# exit: 1
diff --git a/modules/core/test/ref/test58.out b/modules/core/test/ref/test58.out
new file mode 100644
index 00000000..0ece028d
--- /dev/null
+++ b/modules/core/test/ref/test58.out
@@ -0,0 +1,3 @@
+# testing with conf/test58.conf
+httpd: Syntax error on line 2 of ./conf/test58.conf: <Macro> directive missing closing '>'
+# exit: 1
diff --git a/modules/core/test/ref/test59.out b/modules/core/test/ref/test59.out
new file mode 100644
index 00000000..7895917b
--- /dev/null
+++ b/modules/core/test/ref/test59.out
@@ -0,0 +1,3 @@
+# testing with conf/test59.conf
+httpd: Syntax error on line 2 of ./conf/test59.conf: <Macro macro definition: name not found
+# exit: 1
diff --git a/modules/core/test/ref/test60.out b/modules/core/test/ref/test60.out
new file mode 100644
index 00000000..cac70028
--- /dev/null
+++ b/modules/core/test/ref/test60.out
@@ -0,0 +1,15 @@
+# testing with conf/test60.conf
+[core:warn] macro Foo arg 1: hello on line 1 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 14 of "./conf/test60.conf"
+[core:warn] macro Foo arg 2: world on line 2 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 14 of "./conf/test60.conf"
+[core:warn] Macro Bla arg 1: "hello world" on line 1 of macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf"
+[core:warn] Macro Bla arg 2: "thank you" on line 2 of macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf"
+[core:warn] macro Foo arg 1: hello world on line 1 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 3 of "macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf""
+[core:warn] macro Foo arg 2: second on line 2 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 3 of "macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf""
+[core:warn] macro Foo arg 1: first on line 1 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 4 of "macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf""
+[core:warn] macro Foo arg 2: thank you on line 2 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 4 of "macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf""
+[core:warn] macro Foo arg 1: hello world on line 1 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 5 of "macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf""
+[core:warn] macro Foo arg 2: thank you on line 2 of macro 'foo' (defined on line 2 of "./conf/test60.conf") used on line 5 of "macro 'bla' (defined on line 6 of "./conf/test60.conf") used on line 15 of "./conf/test60.conf""
+[core:error] done on line 17. on line 17 of ./conf/test60.conf
+AH00526: Syntax error on line 17 of ./conf/test60.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test61.out b/modules/core/test/ref/test61.out
new file mode 100644
index 00000000..59639c97
--- /dev/null
+++ b/modules/core/test/ref/test61.out
@@ -0,0 +1,9 @@
+# testing with conf/test61.conf
+[core:warn] F4:1 x=line=17 on line 1 of macro 'f4' (defined on line 13 of "./conf/test61.conf") used on line 17 of "./conf/test61.conf"
+[core:warn] F3:1 x=line=17 on line 1 of macro 'f3' (defined on line 9 of "./conf/test61.conf") used on line 2 of "macro 'f4' (defined on line 13 of "./conf/test61.conf") used on line 17 of "./conf/test61.conf""
+[core:warn] F2:1 x=line=17 on line 1 of macro 'f2' (defined on line 5 of "./conf/test61.conf") used on line 2 of "macro 'f3' (defined on line 9 of "./conf/test61.conf") used on line 2 of "macro 'f4' (defined on line 13 of "./conf/test61.conf") used on line 17 of "./conf/test61.conf"""
+[core:warn] F1:1 x=line=17 on line 1 of macro 'f1' (defined on line 2 of "./conf/test61.conf") used on line 2 of "macro 'f2' (defined on line 5 of "./conf/test61.conf") used on line 2 of "macro 'f3' (defined on line 9 of "./conf/test61.conf") used on line 2 of "macro 'f4' (defined on line 13 of "./conf/test61.conf") used on line 17 of "./conf/test61.conf""""
+[core:error] done line 18. on line 18 of ./conf/test61.conf
+AH00526: Syntax error on line 18 of ./conf/test61.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test62.out b/modules/core/test/ref/test62.out
new file mode 100644
index 00000000..a956e7e7
--- /dev/null
+++ b/modules/core/test/ref/test62.out
@@ -0,0 +1,15 @@
+# testing with conf/test62.conf
+[core:warn] Line:1-2 start at 11 on line 1 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 11 of "./conf/test62.conf"
+[core:warn] Line:3-4 stop at 11 on line 2 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 11 of "./conf/test62.conf"
+[core:warn] Line:1-2 start at 12 on line 1 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 13 of "./conf/test62.conf"
+[core:warn] Line:3-4 stop at 13 on line 2 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 13 of "./conf/test62.conf"
+[core:warn] Line:1-2 start at 14 on line 1 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 16 of "./conf/test62.conf"
+[core:warn] Line:3-4 stop at 16 on line 2 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 16 of "./conf/test62.conf"
+[core:warn] Line:1-2 start at 17 on line 1 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 18 of "./conf/test62.conf"
+[core:warn] Line:3-4 stop at 18 on line 2 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 18 of "./conf/test62.conf"
+[core:warn] Line:1-2 start at 19 on line 1 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 23 of "./conf/test62.conf"
+[core:warn] Line:3-4 stop at 23 on line 2 of macro 'line' (defined on line 4 of "./conf/test62.conf") used on line 23 of "./conf/test62.conf"
+[core:error] done line 25. on line 25 of ./conf/test62.conf
+AH00526: Syntax error on line 25 of ./conf/test62.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test63.out b/modules/core/test/ref/test63.out
new file mode 100644
index 00000000..985710dd
--- /dev/null
+++ b/modules/core/test/ref/test63.out
@@ -0,0 +1,10 @@
+# testing with conf/test63.conf
+[core:warn] Foo macro at inc63_.conf:5 on line 1 of macro 'foo' (defined on line 2 of "./conf/inc63_1.conf") used on line 5 of "./conf/inc63_1.conf"
+[core:warn] Foo macro at test63.conf:3 on line 1 of macro 'foo' (defined on line 2 of "./conf/inc63_1.conf") used on line 3 of "./conf/test63.conf"
+[core:warn] Foo macro at inc63_2.conf:2 on line 1 of macro 'foo' (defined on line 2 of "./conf/inc63_1.conf") used on line 2 of "./conf/inc63_2.conf"
+[core:warn] Bla at inc63_2.conf:3 on line 1 of macro 'bla' (defined on line 4 of "./conf/test63.conf") used on line 3 of "./conf/inc63_2.conf"
+[core:warn] Bla at test63.conf:8 on line 1 of macro 'bla' (defined on line 4 of "./conf/test63.conf") used on line 8 of "./conf/test63.conf"
+[core:error] done at line 9. on line 9 of ./conf/test63.conf
+AH00526: Syntax error on line 9 of ./conf/test63.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test64.out b/modules/core/test/ref/test64.out
new file mode 100644
index 00000000..99fec92d
--- /dev/null
+++ b/modules/core/test/ref/test64.out
@@ -0,0 +1,7 @@
+# testing with conf/test64.conf
+[core:warn] on line 2 on line 2 of ./conf/test64.conf
+[core:warn] from line 3 to line 4 on line 4 of ./conf/test64.conf
+[core:error] done on line 5. on line 5 of ./conf/test64.conf
+AH00526: Syntax error on line 5 of ./conf/test64.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test65.out b/modules/core/test/ref/test65.out
new file mode 100644
index 00000000..62882e28
--- /dev/null
+++ b/modules/core/test/ref/test65.out
@@ -0,0 +1,7 @@
+# testing with conf/test65.conf
+[core:warn] Line: on line 6-7 on line 1 of macro 'line' (defined on line 2 of "./conf/test65.conf") used on line 7 of "./conf/test65.conf"
+[core:warn] Line: on line 8-10 on line 1 of macro 'line' (defined on line 2 of "./conf/test65.conf") used on line 10 of "./conf/test65.conf"
+[core:error] done on line 11. on line 11 of ./conf/test65.conf
+AH00526: Syntax error on line 11 of ./conf/test65.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test66.out b/modules/core/test/ref/test66.out
new file mode 100644
index 00000000..db8616a9
--- /dev/null
+++ b/modules/core/test/ref/test66.out
@@ -0,0 +1,7 @@
+# testing with conf/test66.conf
+[core:warn] Foo: x=X y=Y on line 1 of macro 'foo' (defined on line 2 of "./conf/test66.conf") used on line 5 of "./conf/test66.conf"
+[core:warn] Foo: x=$y y=$x on line 1 of macro 'foo' (defined on line 2 of "./conf/test66.conf") used on line 6 of "./conf/test66.conf"
+[core:error] done on line 7. on line 7 of ./conf/test66.conf
+AH00526: Syntax error on line 7 of ./conf/test66.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test67.out b/modules/core/test/ref/test67.out
new file mode 100644
index 00000000..b83f0749
--- /dev/null
+++ b/modules/core/test/ref/test67.out
@@ -0,0 +1,5 @@
+# testing with conf/test67.conf
+[core:error] done at line 1 without LF. on line 1 of ./conf/test67.conf
+AH00526: Syntax error on line 1 of ./conf/test67.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test68.out b/modules/core/test/ref/test68.out
new file mode 100644
index 00000000..0289127b
--- /dev/null
+++ b/modules/core/test/ref/test68.out
@@ -0,0 +1,6 @@
+# testing with conf/test68.conf
+[core:warn] line 2-3 on line 3 of ./conf/test68.conf
+[core:error] done on line 4-5. on line 5 of ./conf/test68.conf
+AH00526: Syntax error on line 5 of ./conf/test68.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/core/test/ref/test69.out b/modules/core/test/ref/test69.out
new file mode 100644
index 00000000..ac0c1dbd
--- /dev/null
+++ b/modules/core/test/ref/test69.out
@@ -0,0 +1,10 @@
+# testing with conf/test69.conf
+[macro:warn] non blank chars found after <Macro closing '>' on line 2 of ./conf/test69.conf: this stuff is ignored...
+[macro:warn] non blank chars found after directive closing on line 4 of ./conf/test69.conf: this stuff is ignored as well...
+[core:warn] Foo on line 1 of macro 'foo' (defined on line 2 of "./conf/test69.conf") used on line 5 of "./conf/test69.conf"
+[core:warn] Bla on line 1 of macro 'bla' (defined on line 6 of "./conf/test69.conf") used on line 9 of "./conf/test69.conf"
+[core:warn] Comments on line 1 of macro 'comments' (defined on line 10 of "./conf/test69.conf") used on line 13 of "./conf/test69.conf"
+[core:error] done on line 14. on line 14 of ./conf/test69.conf
+AH00526: Syntax error on line 14 of ./conf/test69.conf:
+Configuration processing stopped by Error directive
+# exit: 1
diff --git a/modules/dav/fs/dbm.c b/modules/dav/fs/dbm.c
index 53a97e91..821168e8 100644
--- a/modules/dav/fs/dbm.c
+++ b/modules/dav/fs/dbm.c
@@ -191,7 +191,15 @@ void dav_dbm_close(dav_db *db)
dav_error * dav_dbm_fetch(dav_db *db, apr_datum_t key, apr_datum_t *pvalue)
{
- apr_status_t status = apr_dbm_fetch(db->file, key, pvalue);
+ apr_status_t status;
+
+ if (!key.dptr) {
+ /* no key could be created (namespace not known) => no value */
+ memset(pvalue, 0, sizeof(*pvalue));
+ status = APR_SUCCESS;
+ } else {
+ status = apr_dbm_fetch(db->file, key, pvalue);
+ }
return dav_fs_dbm_error(db, NULL, status);
}
@@ -731,6 +739,10 @@ static dav_error * dav_propdb_get_rollback(dav_db *db,
static dav_error * dav_propdb_apply_rollback(dav_db *db,
dav_deadprop_rollback *rollback)
{
+ if (!rollback) {
+ return NULL; /* no rollback, nothing to do */
+ }
+
if (rollback->value.dptr == NULL) {
/* don't fail if the thing isn't really there. */
(void) dav_dbm_delete(db, rollback->key);
diff --git a/modules/dav/main/mod_dav.c b/modules/dav/main/mod_dav.c
index b5059175..9135cd96 100644
--- a/modules/dav/main/mod_dav.c
+++ b/modules/dav/main/mod_dav.c
@@ -611,7 +611,9 @@ static int dav_created(request_rec *r, const char *locn, const char *what,
const char *body;
if (locn == NULL) {
- locn = r->uri;
+ locn = r->unparsed_uri;
+ } else {
+ locn = ap_escape_uri(r->pool, locn);
}
/* did the target resource already exist? */
@@ -707,6 +709,12 @@ static dav_error *dav_get_resource(request_rec *r, int label_allowed,
conf = ap_get_module_config(r->per_dir_config, &dav_module);
/* assert: conf->provider != NULL */
+ if (conf->provider == NULL) {
+ return dav_new_error(r->pool, HTTP_METHOD_NOT_ALLOWED, 0, 0,
+ apr_psprintf(r->pool,
+ "DAV not enabled for %s",
+ ap_escape_html(r->pool, r->uri)));
+ }
/* resolve the resource */
err = (*conf->provider->repos->get_resource)(r, conf->dir,
@@ -995,8 +1003,8 @@ static int dav_method_put(request_rec *r)
else {
/* XXX: should this actually be HTTP_BAD_REQUEST? */
http_err = HTTP_INTERNAL_SERVER_ERROR;
- msg = apr_psprintf(r->pool, "Could not get next bucket "
- "brigade (URI: %s)", msg);
+ msg = apr_psprintf(r->pool, "An error occurred while reading"
+ " the request body (URI: %s)", msg);
}
err = dav_new_error(r->pool, http_err, 0, rc, msg);
break;
@@ -1018,18 +1026,19 @@ static int dav_method_put(request_rec *r)
continue;
}
- rc = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
- if (rc != APR_SUCCESS) {
- err = dav_new_error(r->pool, HTTP_BAD_REQUEST, 0, rc,
- apr_psprintf(r->pool,
- "An error occurred while reading"
- " the request body (URI: %s)",
- ap_escape_html(r->pool, r->uri)));
- break;
- }
-
if (err == NULL) {
/* write whatever we read, until we see an error */
+ rc = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (rc != APR_SUCCESS) {
+ err = dav_new_error(r->pool, HTTP_BAD_REQUEST, 0, rc,
+ apr_psprintf(r->pool,
+ "An error occurred while"
+ " reading the request body"
+ " from the bucket (URI: %s)",
+ ap_escape_html(r->pool, r->uri)));
+ break;
+ }
+
err = (*resource->hooks->write_stream)(stream, data, len);
}
}
@@ -1041,10 +1050,7 @@ static int dav_method_put(request_rec *r)
err2 = (*resource->hooks->close_stream)(stream,
err == NULL /* commit */);
- if (err2 != NULL && err == NULL) {
- /* no error during the write, but we hit one at close. use it. */
- err = err2;
- }
+ err = dav_join_error(err, err2);
}
/*
@@ -1062,6 +1068,7 @@ static int dav_method_put(request_rec *r)
/* check for errors now */
if (err != NULL) {
+ err = dav_join_error(err, err2); /* don't forget err2 */
return dav_handle_err(r, err, NULL);
}
@@ -2683,11 +2690,6 @@ static int dav_method_copymove(request_rec *r, int is_move)
"Destination URI had an error.");
}
- if (dav_get_provider(lookup.rnew) == NULL) {
- return dav_error_response(r, HTTP_METHOD_NOT_ALLOWED,
- "DAV not enabled for Destination URI.");
- }
-
/* Resolve destination resource */
err = dav_get_resource(lookup.rnew, 0 /* label_allowed */,
0 /* use_checked_in */, &resnew);
@@ -2749,10 +2751,10 @@ static int dav_method_copymove(request_rec *r, int is_move)
}
/*
- * Check If-Headers and existing locks for each resource in the source
- * if we are performing a MOVE. We will return a 424 response with a
- * DAV:multistatus body. The multistatus responses will contain the
- * information about any resource that fails the validation.
+ * Check If-Headers and existing locks for each resource in the source.
+ * We will return a 424 response with a DAV:multistatus body.
+ * The multistatus responses will contain the information about any
+ * resource that fails the validation.
*
* We check the parent resource, too, since this is a MOVE. Moving the
* resource effectively removes it from the parent collection, so we
@@ -2761,17 +2763,17 @@ static int dav_method_copymove(request_rec *r, int is_move)
* If a problem occurs with the Request-URI itself, then a plain error
* (rather than a multistatus) will be returned.
*/
- if (is_move
- && (err = dav_validate_request(r, resource, depth, NULL,
- &multi_response,
- DAV_VALIDATE_PARENT
- | DAV_VALIDATE_USE_424,
- NULL)) != NULL) {
+ if ((err = dav_validate_request(r, resource, depth, NULL,
+ &multi_response,
+ DAV_VALIDATE_PARENT
+ | DAV_VALIDATE_USE_424,
+ NULL)) != NULL) {
err = dav_push_error(r->pool, err->status, 0,
apr_psprintf(r->pool,
- "Could not MOVE %s due to a failed "
+ "Could not %s %s due to a failed "
"precondition on the source "
"(e.g. locks).",
+ is_move ? "MOVE" : "COPY",
ap_escape_html(r->pool, r->uri)),
err);
return dav_handle_err(r, err, multi_response);
diff --git a/modules/dav/main/mod_dav.h b/modules/dav/main/mod_dav.h
index 768638c3..7b91b63c 100644
--- a/modules/dav/main/mod_dav.h
+++ b/modules/dav/main/mod_dav.h
@@ -169,6 +169,21 @@ DAV_DECLARE(dav_error*) dav_push_error(apr_pool_t *p, int status, int error_id,
const char *desc, dav_error *prev);
+/*
+** Join two errors together.
+**
+** This function is used to add a new error stack onto an existing error so
+** that subsequent errors can be reported after the first error. It returns
+** the correct error stack to use so that the caller can blindly call it
+** without checking that both dest and src are not NULL.
+**
+** <dest> is the error stack that the error will be added to.
+**
+** <src> is the error stack that will be appended.
+*/
+DAV_DECLARE(dav_error*) dav_join_error(dav_error* dest, dav_error* src);
+
+
/* error ID values... */
/* IF: header errors */
diff --git a/modules/dav/main/props.c b/modules/dav/main/props.c
index d87dd611..e5eaef16 100644
--- a/modules/dav/main/props.c
+++ b/modules/dav/main/props.c
@@ -594,13 +594,14 @@ DAV_DECLARE(dav_get_props_result) dav_get_allprops(dav_propdb *propdb,
if (propdb->db != NULL) {
dav_xmlns_info *xi = dav_xmlns_create(propdb->p);
dav_prop_name name;
+ dav_error *err;
/* define (up front) any namespaces the db might need */
(void) (*db_hooks->define_namespaces)(propdb->db, xi);
/* get the first property name, beginning the scan */
- (void) (*db_hooks->first_name)(propdb->db, &name);
- while (name.ns != NULL) {
+ err = (*db_hooks->first_name)(propdb->db, &name);
+ while (!err && name.ns) {
/*
** We also look for <DAV:getcontenttype> and
@@ -619,7 +620,6 @@ DAV_DECLARE(dav_get_props_result) dav_get_allprops(dav_propdb *propdb,
}
if (what == DAV_PROP_INSERT_VALUE) {
- dav_error *err;
int found;
if ((err = (*db_hooks->output_value)(propdb->db, &name,
@@ -638,7 +638,7 @@ DAV_DECLARE(dav_get_props_result) dav_get_allprops(dav_propdb *propdb,
}
next_key:
- (void) (*db_hooks->next_name)(propdb->db, &name);
+ err = (*db_hooks->next_name)(propdb->db, &name);
}
/* all namespaces have been entered into xi. generate them into
diff --git a/modules/dav/main/util.c b/modules/dav/main/util.c
index aa085841..ab42af02 100644
--- a/modules/dav/main/util.c
+++ b/modules/dav/main/util.c
@@ -77,6 +77,30 @@ DAV_DECLARE(dav_error*) dav_push_error(apr_pool_t *p, int status,
return err;
}
+DAV_DECLARE(dav_error*) dav_join_error(dav_error *dest, dav_error *src)
+{
+ dav_error *curr = dest;
+
+ /* src error doesn't exist so nothing to join just return dest */
+ if (src == NULL) {
+ return dest;
+ }
+
+ /* dest error doesn't exist so nothing to join just return src */
+ if (curr == NULL) {
+ return src;
+ }
+
+ /* find last error in dest stack */
+ while (curr->prev != NULL) {
+ curr = curr->prev;
+ }
+
+ /* add the src error onto end of dest stack and return it */
+ curr->prev = src;
+ return dest;
+}
+
DAV_DECLARE(void) dav_check_bufsize(apr_pool_t * p, dav_buffer *pbuf,
apr_size_t extra_needed)
{
@@ -635,9 +659,19 @@ static dav_error * dav_process_if_header(request_rec *r, dav_if_header **p_ih)
/* clean up the URI a bit */
ap_getparents(parsed_uri.path);
+
+ /* the resources we will compare to have unencoded paths */
+ if (ap_unescape_url(parsed_uri.path) != OK) {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_TAGGED, rv,
+ "Invalid percent encoded URI in "
+ "tagged If-header.");
+ }
+
uri_len = strlen(parsed_uri.path);
- if (uri_len > 1 && parsed_uri.path[uri_len - 1] == '/')
+ if (uri_len > 1 && parsed_uri.path[uri_len - 1] == '/') {
parsed_uri.path[--uri_len] = '\0';
+ }
uri = parsed_uri.path;
list_type = tagged;
diff --git a/modules/filters/mod_charset_lite.c b/modules/filters/mod_charset_lite.c
index 39ab18fc..2d3d1439 100644
--- a/modules/filters/mod_charset_lite.c
+++ b/modules/filters/mod_charset_lite.c
@@ -474,7 +474,7 @@ static void log_xlate_error(ap_filter_t *f, apr_status_t rv)
charset_filter_ctx_t *ctx = f->ctx;
const char *msg;
char msgbuf[100];
- int cur;
+ int len;
switch(ctx->ees) {
case EES_LIMIT:
@@ -492,12 +492,14 @@ static void log_xlate_error(ap_filter_t *f, apr_status_t rv)
case EES_INCOMPLETE_CHAR:
rv = 0;
strcpy(msgbuf, APLOGNO(02196) "xlate filter - incomplete char at end of input - ");
- cur = 0;
- while ((apr_size_t)cur < ctx->saved) {
- apr_snprintf(msgbuf + strlen(msgbuf), sizeof(msgbuf) - strlen(msgbuf),
- "%02X", (unsigned)ctx->buf[cur]);
- ++cur;
- }
+ len = ctx->saved;
+
+ /* We must ensure not to process more than what would fit in the
+ * remaining of the destination buffer, including terminating NULL */
+ if (len > (sizeof(msgbuf) - strlen(msgbuf) - 1) / 2)
+ len = (sizeof(msgbuf) - strlen(msgbuf) - 1) / 2;
+
+ ap_bin2hex(ctx->buf, len, msgbuf + strlen(msgbuf));
msg = msgbuf;
break;
case EES_DOWNSTREAM:
diff --git a/modules/filters/mod_deflate.c b/modules/filters/mod_deflate.c
index 48d37b13..79f6f8d1 100644
--- a/modules/filters/mod_deflate.c
+++ b/modules/filters/mod_deflate.c
@@ -304,8 +304,9 @@ typedef struct deflate_ctx_t
int (*libz_end_func)(z_streamp);
unsigned char *validation_buffer;
apr_size_t validation_buffer_length;
- int inflate_init;
- int filter_init;
+ unsigned int inflate_init:1;
+ unsigned int filter_init:1;
+ unsigned int done:1;
} deflate_ctx;
/* Number of validation bytes (CRC and length) after the compressed data */
@@ -942,6 +943,13 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
return rv;
}
+ /* zero length body? step aside */
+ bkt = APR_BRIGADE_FIRST(ctx->bb);
+ if (APR_BUCKET_IS_EOS(bkt)) {
+ ap_remove_input_filter(f);
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
+ }
+
apr_table_unset(r->headers_in, "Content-Length");
apr_table_unset(r->headers_in, "Content-MD5");
@@ -1002,12 +1010,19 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
const char *data;
apr_size_t len;
- /* If we actually see the EOS, that means we screwed up! */
if (APR_BUCKET_IS_EOS(bkt)) {
- inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01390)
- "Encountered EOS bucket in inflate filter (bug?)");
- return APR_EGENERAL;
+ if (!ctx->done) {
+ inflateEnd(&ctx->stream);
+ ap_log_rerror(
+ APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02481) "Encountered premature end-of-stream while inflating");
+ return APR_EGENERAL;
+ }
+
+ /* Move everything to the returning brigade. */
+ APR_BUCKET_REMOVE(bkt);
+ APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, bkt);
+ ap_remove_input_filter(f);
+ break;
}
if (APR_BUCKET_IS_FLUSH(bkt)) {
@@ -1036,6 +1051,13 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
break;
}
+ /* sanity check - data after completed compressed body and before eos? */
+ if (ctx->done) {
+ ap_log_rerror(
+ APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02482) "Encountered extra data after compressed data");
+ return APR_EGENERAL;
+ }
+
/* read */
apr_bucket_read(bkt, &data, &len, APR_BLOCK_READ);
@@ -1073,7 +1095,7 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
}
}
if (zRC == Z_STREAM_END) {
- apr_bucket *tmp_heap, *eos;
+ apr_bucket *tmp_heap;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01393)
"Zlib: Inflated %ld to %ld : URL %s",
@@ -1120,9 +1142,7 @@ static apr_status_t deflate_in_filter(ap_filter_t *f,
inflateEnd(&ctx->stream);
- eos = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->proc_bb, eos);
- break;
+ ctx->done = 1;
}
}
@@ -1356,7 +1376,8 @@ static apr_status_t inflate_out_filter(ap_filter_t *f,
apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
/* first bucket contains zlib header */
- if (!ctx->inflate_init++) {
+ if (!ctx->inflate_init) {
+ ctx->inflate_init = 1;
if (len < 10) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01403)
"Insufficient data for inflate");
diff --git a/modules/filters/mod_ext_filter.c b/modules/filters/mod_ext_filter.c
index 658c121a..bd72526d 100644
--- a/modules/filters/mod_ext_filter.c
+++ b/modules/filters/mod_ext_filter.c
@@ -396,7 +396,6 @@ static void child_errfn(apr_pool_t *pool, apr_status_t err, const char *descript
request_rec *r;
void *vr;
apr_file_t *stderr_log;
- char errbuf[200];
char time_str[APR_CTIME_LEN];
apr_pool_userdata_get(&vr, ERRFN_USERDATA_KEY, pool);
@@ -404,11 +403,11 @@ static void child_errfn(apr_pool_t *pool, apr_status_t err, const char *descript
apr_file_open_stderr(&stderr_log, pool);
ap_recent_ctime(time_str, apr_time_now());
apr_file_printf(stderr_log,
- "[%s] [client %s] mod_ext_filter (%d)%s: %s\n",
+ "[%s] [client %s] mod_ext_filter (%d)%pm: %s\n",
time_str,
r->useragent_ip,
err,
- apr_strerror(err, errbuf, sizeof(errbuf)),
+ &err,
description);
}
diff --git a/modules/filters/mod_include.c b/modules/filters/mod_include.c
index 8b9c4c77..af90db67 100644
--- a/modules/filters/mod_include.c
+++ b/modules/filters/mod_include.c
@@ -2411,7 +2411,10 @@ static apr_status_t handle_elif(include_ctx_t *ctx, ap_filter_t *f,
return APR_SUCCESS;
}
- expr_ret = parse_expr(ctx, expr, &was_error);
+ if (ctx->intern->legacy_expr)
+ expr_ret = parse_expr(ctx, expr, &was_error);
+ else
+ expr_ret = parse_ap_expr(ctx, expr, &was_error);
if (was_error) {
SSI_CREATE_ERROR_BUCKET(ctx, f, bb);
diff --git a/modules/filters/mod_proxy_html.c b/modules/filters/mod_proxy_html.c
index 6cbe87a9..3e5170d1 100644
--- a/modules/filters/mod_proxy_html.c
+++ b/modules/filters/mod_proxy_html.c
@@ -668,7 +668,7 @@ static meta *metafix(request_rec *r, const char *buf)
if (p != NULL) {
while (*p) {
p += 7;
- while (*p && apr_isspace(*p))
+ while (apr_isspace(*p))
++p;
if (*p != '=')
continue;
@@ -1142,11 +1142,11 @@ static const char *set_flags(cmd_parms *cmd, void *CFG, const char *arg)
{
proxy_html_conf *cfg = CFG;
if (arg && *arg) {
- if (!strcmp(arg, "lowercase"))
+ if (!strcasecmp(arg, "lowercase"))
cfg->flags |= NORM_LC;
- else if (!strcmp(arg, "dospath"))
+ else if (!strcasecmp(arg, "dospath"))
cfg->flags |= NORM_MSSLASH;
- else if (!strcmp(arg, "reset"))
+ else if (!strcasecmp(arg, "reset"))
cfg->flags |= NORM_RESET;
}
return NULL;
diff --git a/modules/filters/mod_ratelimit.c b/modules/filters/mod_ratelimit.c
index 028de361..939ab8e9 100644
--- a/modules/filters/mod_ratelimit.c
+++ b/modules/filters/mod_ratelimit.c
@@ -74,6 +74,7 @@ rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *input_bb)
if (ctx == NULL) {
const char *rl = NULL;
+ int ratelimit;
/* no subrequests. */
if (f->r->main != NULL) {
@@ -87,22 +88,21 @@ rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *input_bb)
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
-
- /* first run, init stuff */
- ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t));
- f->ctx = ctx;
- ctx->speed = 0;
- ctx->state = RATE_LIMIT;
-
+
/* rl is in kilo bytes / second */
- ctx->speed = atoi(rl) * 1024;
-
- if (ctx->speed == 0) {
+ ratelimit = atoi(rl) * 1024;
+ if (ratelimit <= 0) {
/* remove ourselves */
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
+ /* first run, init stuff */
+ ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t));
+ f->ctx = ctx;
+ ctx->state = RATE_LIMIT;
+ ctx->speed = ratelimit;
+
/* calculate how many bytes / interval we want to send */
/* speed is bytes / second, so, how many (speed / 1000 % interval) */
ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS));
@@ -187,7 +187,7 @@ rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *input_bb)
rv = apr_brigade_partition(bb, ctx->chunk_size, &stop_point);
if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) {
ctx->state = RATE_ERROR;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01456)
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456)
"rl: partition failed.");
break;
}
diff --git a/modules/filters/regexp.h b/modules/filters/regexp.h
index 1e5a6269..6af89120 100644
--- a/modules/filters/regexp.h
+++ b/modules/filters/regexp.h
@@ -69,7 +69,8 @@ typedef struct _sed_comp_args {
extern char *sed_compile(sed_commands_t *commands, sed_comp_args *compargs,
char *ep, char *endbuf, int seof);
-extern void command_errf(sed_commands_t *commands, const char *fmt, ...);
+extern void command_errf(sed_commands_t *commands, const char *fmt, ...)
+ __attribute__((format(printf,2,3)));
#define SEDERR_CGMES "command garbled: %s"
#define SEDERR_SMMES "Space missing before filename: %s"
diff --git a/modules/filters/sed0.c b/modules/filters/sed0.c
index 8c32baf6..ddc4bfed 100644
--- a/modules/filters/sed0.c
+++ b/modules/filters/sed0.c
@@ -275,7 +275,7 @@ comploop:
}
if(p > &commands->respace[RESIZE-1]) {
- command_errf(commands, SEDERR_TMMES);
+ command_errf(commands, SEDERR_TMMES, commands->linebuf);
return -1;
}
diff --git a/modules/generators/mod_autoindex.c b/modules/generators/mod_autoindex.c
index 78a774ef..3d36c77b 100644
--- a/modules/generators/mod_autoindex.c
+++ b/modules/generators/mod_autoindex.c
@@ -1752,10 +1752,10 @@ static void output_directories(struct ent **ar, int n,
}
if (!(autoindex_opts & SUPPRESS_LAST_MOD)) {
if (ar[x]->lm != -1) {
- char time_str[MAX_STRING_LEN];
+ char time_str[32];
apr_time_exp_t ts;
apr_time_exp_lt(&ts, ar[x]->lm);
- apr_strftime(time_str, &rv, MAX_STRING_LEN,
+ apr_strftime(time_str, &rv, sizeof(time_str),
"%Y-%m-%d %H:%M ",
&ts);
ap_rvputs(r, "</td><td", (d->style_sheet != NULL) ? " class=\"indexcollastmod\">" : " align=\"right\">",time_str, NULL);
@@ -1840,10 +1840,10 @@ static void output_directories(struct ent **ar, int n,
ap_rputs(" ", r);
if (!(autoindex_opts & SUPPRESS_LAST_MOD)) {
if (ar[x]->lm != -1) {
- char time_str[MAX_STRING_LEN];
+ char time_str[32];
apr_time_exp_t ts;
apr_time_exp_lt(&ts, ar[x]->lm);
- apr_strftime(time_str, &rv, MAX_STRING_LEN,
+ apr_strftime(time_str, &rv, sizeof(time_str),
"%Y-%m-%d %H:%M ", &ts);
ap_rputs(time_str, r);
}
diff --git a/modules/generators/mod_cgi.c b/modules/generators/mod_cgi.c
index 5a0cb382..7808262f 100644
--- a/modules/generators/mod_cgi.c
+++ b/modules/generators/mod_cgi.c
@@ -354,16 +354,15 @@ static void cgi_child_errfn(apr_pool_t *pool, apr_status_t err,
const char *description)
{
apr_file_t *stderr_log;
- char errbuf[200];
apr_file_open_stderr(&stderr_log, pool);
/* Escape the logged string because it may be something that
* came in over the network.
*/
apr_file_printf(stderr_log,
- "(%d)%s: %s\n",
+ "(%d)%pm: %s\n",
err,
- apr_strerror(err, errbuf, sizeof(errbuf)),
+ &err,
#ifndef AP_UNSAFE_ERROR_LOG_UNESCAPED
ap_escape_logitem(pool,
#endif
diff --git a/modules/generators/mod_status.c b/modules/generators/mod_status.c
index 0237f1d7..fe832b32 100644
--- a/modules/generators/mod_status.c
+++ b/modules/generators/mod_status.c
@@ -405,6 +405,8 @@ static int status_handler(request_rec *r)
")</h1>\n\n", NULL);
ap_rvputs(r, "<dl><dt>Server Version: ",
ap_get_server_description(), "</dt>\n", NULL);
+ ap_rvputs(r, "<dt>Server MPM: ",
+ ap_show_mpm(), "</dt>\n", NULL);
ap_rvputs(r, "<dt>Server Built: ",
ap_get_server_built(), "\n</dt></dl><hr /><dl>\n", NULL);
ap_rvputs(r, "<dt>Current Time: ",
@@ -619,7 +621,6 @@ static int status_handler(request_rec *r)
}
if (ap_extended_status && !short_report) {
- apr_table_t *vhosts = apr_table_make(r->pool, 10);
if (no_table_report)
ap_rputs("<hr /><h2>Server Details</h2>\n\n", r);
else
@@ -636,10 +637,6 @@ static int status_handler(request_rec *r)
for (i = 0; i < server_limit; ++i) {
for (j = 0; j < thread_limit; ++j) {
- char *escvhost;
- long last_used;
- const char *vlast;
-
ws_record = ap_get_scoreboard_worker_from_indexes(i, j);
if (ws_record->access_count == 0 &&
@@ -659,22 +656,6 @@ static int status_handler(request_rec *r)
if (req_time < 0L)
req_time = 0L;
- escvhost = ap_escape_html(r->pool, ws_record->vhost);
- last_used = (long)apr_time_sec(nowtime - ws_record->last_used);
- if (escvhost && *escvhost) {
- if ((vlast = apr_table_get(vhosts, escvhost)) != NULL) {
- long temp = atol(vlast);
- if (last_used < temp) {
- apr_table_setn(vhosts, apr_pstrdup(r->pool, escvhost),
- apr_psprintf(r->pool, "%ld", last_used));
- }
- }
- else {
- apr_table_setn(vhosts, apr_pstrdup(r->pool, escvhost),
- apr_psprintf(r->pool, "%ld", last_used));
- }
- }
-
lres = ws_record->access_count;
my_lres = ws_record->my_access_count;
conn_lres = ws_record->conn_count;
@@ -754,7 +735,8 @@ static int status_handler(request_rec *r)
ws_record->times.tms_cutime / tick,
ws_record->times.tms_cstime / tick,
#endif
- last_used,
+ (long)apr_time_sec(nowtime -
+ ws_record->last_used),
(long) req_time);
format_byte_out(r, conn_bytes);
@@ -770,7 +752,8 @@ static int status_handler(request_rec *r)
ap_escape_html(r->pool,
ap_escape_logitem(r->pool,
ws_record->request)),
- escvhost);
+ ap_escape_html(r->pool,
+ ws_record->vhost));
}
else { /* !no_table_report */
if (ws_record->status == SERVER_DEAD)
@@ -839,7 +822,8 @@ static int status_handler(request_rec *r)
ws_record->times.tms_cutime +
ws_record->times.tms_cstime) / tick,
#endif
- last_used,
+ (long)apr_time_sec(nowtime -
+ ws_record->last_used),
(long)req_time);
ap_rprintf(r, "</td><td>%-1.1f</td><td>%-2.2f</td><td>%-2.2f\n",
@@ -850,7 +834,8 @@ static int status_handler(request_rec *r)
"<td nowrap>%s</td></tr>\n\n",
ap_escape_html(r->pool,
ws_record->client),
- escvhost,
+ ap_escape_html(r->pool,
+ ws_record->vhost),
ap_escape_html(r->pool,
ap_escape_logitem(r->pool,
ws_record->request)));
@@ -859,10 +844,6 @@ static int status_handler(request_rec *r)
} /* for (i...) */
if (!no_table_report) {
- int i;
- const apr_array_header_t *arr = apr_table_elts(vhosts);
- const apr_table_entry_t *elts = (const apr_table_entry_t *)arr->elts;
-
ap_rputs("</table>\n \
<hr /> \
<table>\n \
@@ -881,13 +862,6 @@ static int status_handler(request_rec *r)
<tr><th>Child</th><td>Megabytes transferred this child</td></tr>\n \
<tr><th>Slot</th><td>Total megabytes transferred this slot</td></tr>\n \
</table>\n", r);
- ap_rputs("<hr />\n<table>\n\
-<tr><th>Vhost</th><th>Seconds since last used</th></tr>\n", r);
- for (i = 0; i < arr->nelts; i++) {
- ap_rprintf(r, "<tr><td><pre>%s</pre></td><td><pre>%27s</pre></td></tr>\n",
- elts[i].key, elts[i].val);
- }
- ap_rputs("</table>\n", r);
}
} /* if (ap_extended_status && !short_report) */
else {
@@ -981,4 +955,3 @@ AP_DECLARE_MODULE(status) =
NULL, /* command table */
register_hooks /* register_hooks */
};
-
diff --git a/modules/http/byterange_filter.c b/modules/http/byterange_filter.c
index b2c678da..09f19565 100644
--- a/modules/http/byterange_filter.c
+++ b/modules/http/byterange_filter.c
@@ -82,8 +82,6 @@ static int ap_set_byterange(request_rec *r, apr_off_t clength,
int *overlaps, int *reversals)
{
const char *range;
- const char *if_range;
- const char *match;
const char *ct;
char *cur;
apr_array_header_t *merged;
@@ -135,20 +133,9 @@ static int ap_set_byterange(request_rec *r, apr_off_t clength,
/*
* Check the If-Range header for Etag or Date.
- * Note that this check will return false (as required) if either
- * of the two etags are weak.
*/
- if ((if_range = apr_table_get(r->headers_in, "If-Range"))) {
- if (if_range[0] == '"') {
- if (!(match = apr_table_get(r->headers_out, "Etag"))
- || (strcmp(if_range, match) != 0)) {
- return 0;
- }
- }
- else if (!(match = apr_table_get(r->headers_out, "Last-Modified"))
- || (strcmp(if_range, match) != 0)) {
- return 0;
- }
+ if (AP_CONDITION_NOMATCH == ap_condition_if_range(r, r->headers_out)) {
+ return 0;
}
range += 6;
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
index c4a9f342..24a939a4 100644
--- a/modules/http/http_filters.c
+++ b/modules/http/http_filters.c
@@ -265,7 +265,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
/* test lenp, because it gives another case we can handle */
else if (!lenp) {
/* Something that isn't in HTTP, unless some future
- * edition defines new transfer ecodings, is unsupported.
+ * edition defines new transfer encodings, is unsupported.
*/
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585)
"Unknown Transfer-Encoding: %s", tenc);
diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
index 2f786123..6705e704 100644
--- a/modules/http/http_protocol.c
+++ b/modules/http/http_protocol.c
@@ -306,13 +306,229 @@ AP_DECLARE(int) ap_set_keepalive(request_rec *r)
return 0;
}
+AP_DECLARE(ap_condition_e) ap_condition_if_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_match, *etag;
+
+ /* A server MUST use the strong comparison function (see section 13.3.3)
+ * to compare the entity tags in If-Match.
+ */
+ if ((if_match = apr_table_get(r->headers_in, "If-Match")) != NULL) {
+ if (if_match[0] == '*'
+ || ((etag = apr_table_get(headers, "ETag")) == NULL
+ && !ap_find_etag_strong(r->pool, if_match, etag))) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_unmodified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_unmodified;
+
+ if_unmodified = apr_table_get(r->headers_in, "If-Unmodified-Since");
+ if (if_unmodified) {
+ apr_int64_t mtime, reqtime;
+
+ apr_time_t ius = apr_time_sec(apr_date_parse_http(if_unmodified));
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ if ((ius != APR_DATE_BAD) && (mtime > ius)) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_none_match(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_nonematch, *etag;
+
+ if_nonematch = apr_table_get(r->headers_in, "If-None-Match");
+ if (if_nonematch != NULL) {
+
+ if (if_nonematch[0] == '*') {
+ return AP_CONDITION_STRONG;
+ }
+
+ /* See section 13.3.3 for rules on how to determine if two entities tags
+ * match. The weak comparison function can only be used with GET or HEAD
+ * requests.
+ */
+ if (r->method_number == M_GET) {
+ if ((etag = apr_table_get(headers, "ETag")) != NULL) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ if (ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ if (ap_find_etag_weak(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ }
+ }
+
+ else if ((etag = apr_table_get(headers, "ETag")) != NULL
+ && ap_find_etag_strong(r->pool, if_nonematch, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ return AP_CONDITION_NOMATCH;
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_modified_since(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_modified_since;
+
+ if ((if_modified_since = apr_table_get(r->headers_in, "If-Modified-Since"))
+ != NULL) {
+ apr_int64_t mtime;
+ apr_int64_t ims, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ ims = apr_time_sec(apr_date_parse_http(if_modified_since));
+
+ if (ims >= mtime && ims <= reqtime) {
+ if (reqtime < mtime + 60) {
+ if (apr_table_get(r->headers_in, "Range")) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_WEAK;
+ }
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
+AP_DECLARE(ap_condition_e) ap_condition_if_range(request_rec *r,
+ apr_table_t *headers)
+{
+ const char *if_range, *etag;
+
+ if ((if_range = apr_table_get(r->headers_in, "If-Range"))
+ && apr_table_get(r->headers_in, "Range")) {
+ if (if_range[0] == '"') {
+
+ if ((etag = apr_table_get(headers, "ETag"))
+ && !strcmp(if_range, etag)) {
+ return AP_CONDITION_STRONG;
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+
+ }
+ else {
+ apr_int64_t mtime;
+ apr_int64_t rtime, reqtime;
+
+ /* All of our comparisons must be in seconds, because that's the
+ * highest time resolution the HTTP specification allows.
+ */
+
+ mtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Last-Modified")));
+ if (mtime == APR_DATE_BAD) {
+ mtime = apr_time_sec(r->mtime ? r->mtime : apr_time_now());
+ }
+
+ reqtime = apr_time_sec(apr_date_parse_http(
+ apr_table_get(headers, "Date")));
+ if (!reqtime) {
+ reqtime = apr_time_sec(r->request_time);
+ }
+
+ rtime = apr_time_sec(apr_date_parse_http(if_range));
+
+ if (rtime == mtime) {
+ if (reqtime < mtime + 60) {
+ /* weak matches not allowed with Range requests */
+ return AP_CONDITION_NOMATCH;
+ }
+ else {
+ return AP_CONDITION_STRONG;
+ }
+ }
+ else {
+ return AP_CONDITION_NOMATCH;
+ }
+ }
+ }
+
+ return AP_CONDITION_NONE;
+}
+
AP_DECLARE(int) ap_meets_conditions(request_rec *r)
{
- const char *etag;
- const char *if_match, *if_modified_since, *if_unmodified, *if_nonematch;
- apr_time_t tmp_time;
- apr_int64_t mtime;
- int not_modified = 0;
+ int not_modified = -1; /* unset by default */
+ ap_condition_e cond;
/* Check for conditional requests --- note that we only want to do
* this if we are successful so far and we are not processing a
@@ -329,41 +545,30 @@ AP_DECLARE(int) ap_meets_conditions(request_rec *r)
return OK;
}
- etag = apr_table_get(r->headers_out, "ETag");
-
- /* All of our comparisons must be in seconds, because that's the
- * highest time resolution the HTTP specification allows.
- */
- /* XXX: we should define a "time unset" constant */
- tmp_time = ((r->mtime != 0) ? r->mtime : apr_time_now());
- mtime = apr_time_sec(tmp_time);
-
/* If an If-Match request-header field was given
* AND the field value is not "*" (meaning match anything)
* AND if our strong ETag does not match any entity tag in that field,
* respond with a status of 412 (Precondition Failed).
*/
- if ((if_match = apr_table_get(r->headers_in, "If-Match")) != NULL) {
- if (if_match[0] != '*'
- && (etag == NULL || etag[0] == 'W'
- || !ap_find_list_item(r->pool, if_match, etag))) {
- return HTTP_PRECONDITION_FAILED;
- }
+ cond = ap_condition_if_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ return HTTP_PRECONDITION_FAILED;
}
- else {
- /* Else if a valid If-Unmodified-Since request-header field was given
- * AND the requested resource has been modified since the time
- * specified in this field, then the server MUST
- * respond with a status of 412 (Precondition Failed).
- */
- if_unmodified = apr_table_get(r->headers_in, "If-Unmodified-Since");
- if (if_unmodified != NULL) {
- apr_time_t ius = apr_date_parse_http(if_unmodified);
- if ((ius != APR_DATE_BAD) && (mtime > apr_time_sec(ius))) {
- return HTTP_PRECONDITION_FAILED;
- }
- }
+ /* Else if a valid If-Unmodified-Since request-header field was given
+ * AND the requested resource has been modified since the time
+ * specified in this field, then the server MUST
+ * respond with a status of 412 (Precondition Failed).
+ */
+ cond = ap_condition_if_unmodified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ return HTTP_PRECONDITION_FAILED;
}
/* If an If-None-Match request-header field was given
@@ -378,27 +583,17 @@ AP_DECLARE(int) ap_meets_conditions(request_rec *r)
* GET or HEAD allow weak etag comparison, all other methods require
* strong comparison. We can only use weak if it's not a range request.
*/
- if_nonematch = apr_table_get(r->headers_in, "If-None-Match");
- if (if_nonematch != NULL) {
+ cond = ap_condition_if_none_match(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
if (r->method_number == M_GET) {
- if (if_nonematch[0] == '*') {
+ if (not_modified) {
not_modified = 1;
}
- else if (etag != NULL) {
- if (apr_table_get(r->headers_in, "Range")) {
- not_modified = etag[0] != 'W'
- && ap_find_list_item(r->pool,
- if_nonematch, etag);
- }
- else {
- not_modified = ap_find_list_item(r->pool,
- if_nonematch, etag);
- }
- }
}
- else if (if_nonematch[0] == '*'
- || (etag != NULL
- && ap_find_list_item(r->pool, if_nonematch, etag))) {
+ else {
return HTTP_PRECONDITION_FAILED;
}
}
@@ -410,22 +605,27 @@ AP_DECLARE(int) ap_meets_conditions(request_rec *r)
* respond with a status of 304 (Not Modified).
* A date later than the server's current request time is invalid.
*/
- if (r->method_number == M_GET
- && (not_modified || !if_nonematch)
- && (if_modified_since =
- apr_table_get(r->headers_in,
- "If-Modified-Since")) != NULL) {
- apr_time_t ims_time;
- apr_int64_t ims, reqtime;
-
- ims_time = apr_date_parse_http(if_modified_since);
- ims = apr_time_sec(ims_time);
- reqtime = apr_time_sec(r->request_time);
+ cond = ap_condition_if_modified_since(r, r->headers_out);
+ if (AP_CONDITION_NOMATCH == cond) {
+ not_modified = 0;
+ }
+ else if (cond >= AP_CONDITION_WEAK) {
+ if (r->method_number == M_GET) {
+ if (not_modified) {
+ not_modified = 1;
+ }
+ }
+ }
- not_modified = ims >= mtime && ims <= reqtime;
+ /* If an If-Range and an Range header is present, we must return
+ * 200 OK. The byterange filter will convert it to a range response.
+ */
+ cond = ap_condition_if_range(r, r->headers_out);
+ if (cond > AP_CONDITION_NONE) {
+ return OK;
}
- if (not_modified) {
+ if (not_modified == 1) {
return HTTP_NOT_MODIFIED;
}
diff --git a/modules/loggers/mod_log_config.c b/modules/loggers/mod_log_config.c
index 31c53db6..e1a82041 100644
--- a/modules/loggers/mod_log_config.c
+++ b/modules/loggers/mod_log_config.c
@@ -597,6 +597,10 @@ static apr_time_t get_request_end_time(request_rec *r)
{
log_request_state *state = (log_request_state *)ap_get_module_config(r->request_config,
&log_config_module);
+ if (!state) {
+ state = apr_pcalloc(r->pool, sizeof(log_request_state));
+ ap_set_module_config(r->request_config, &log_config_module, state);
+ }
if (state->request_end_time == 0) {
state->request_end_time = apr_time_now();
}
diff --git a/modules/loggers/mod_log_forensic.c b/modules/loggers/mod_log_forensic.c
index 1be12302..bb808e88 100644
--- a/modules/loggers/mod_log_forensic.c
+++ b/modules/loggers/mod_log_forensic.c
@@ -126,7 +126,7 @@ static char *log_escape(char *q, const char *e, const char *p)
if (test_char_table[*(unsigned char *)p]&T_ESCAPE_FORENSIC) {
ap_assert(q+2 < e);
*q++ = '%';
- sprintf(q, "%02x", *(unsigned char *)p);
+ ap_bin2hex(p, 1, q);
q += 2;
}
else
diff --git a/modules/lua/NWGNUmakefile b/modules/lua/NWGNUmakefile
index be418cfe..15e6c91e 100644
--- a/modules/lua/NWGNUmakefile
+++ b/modules/lua/NWGNUmakefile
@@ -26,10 +26,10 @@ include $(AP_WORK)/build/NWGNUhead.inc
XINCDIRS += \
$(APR)/include \
$(APRUTIL)/include \
- $(AP_WORK)/include \
- $(AP_WORK)/modules/database \
- $(AP_WORK)/modules/http \
- $(AP_WORK)/modules/ssl \
+ $(SRC)/include \
+ $(STDMOD)/database \
+ $(STDMOD)/http \
+ $(STDMOD)/ssl \
$(NWOS) \
$(LUASRC)/src \
$(EOLIST)
@@ -129,7 +129,7 @@ NLM_VERSION =
#
# If this is specified, it will override the default of 64K
#
-NLM_STACK_SIZE = 8192
+NLM_STACK_SIZE = 131072
#
@@ -181,6 +181,7 @@ FILES_nlm_objs = \
$(OBJDIR)/mod_lua.o \
$(OBJDIR)/lua_apr.o \
$(OBJDIR)/lua_config.o \
+ $(OBJDIR)/lua_passwd.o \
$(OBJDIR)/lua_request.o \
$(OBJDIR)/lua_vmprep.o \
$(OBJDIR)/lua_dbd.o \
diff --git a/modules/lua/README b/modules/lua/README
index c614b3e2..0be0adeb 100644
--- a/modules/lua/README
+++ b/modules/lua/README
@@ -38,13 +38,10 @@
* Task List
** TODO Use r->file to determine file, doing rewriting in translate_name
-** TODO Change to controlling lifecycle by passing in a pool?
- Need to determine how to handle server scoped then!
** TODO Provide means to get useful output from lua errors in response body
Probably have to put it on the vm spec for pre-handler errors, as
it is pre-handler, will prolly be on the request_config somewhere,
but sometimes cannot put there, so... fun
-** TODO Filters
** TODO Mapping in the server_rec
** TODO Connection scoped vms
** TODO Figure out how reentrancy works regarding filter chain stuff.
@@ -52,14 +49,10 @@
** TODO Flesh out apw_*getvm for each flavor we allow
** TODO Rework apw_sgetvm to use the create_vm stuff like apw_rgetvm
** TODO apw_rgetvm needs to handle connection scoped vms
-** TODO options in server scoped vms (ie, min and max vm counts)
** TODO provide means to implement authn and authz providers
** TODO: Flatten LuaHook* to LuaHook phase file fn ?
-** TODO: Lua and ap_expr integration in one or both directions
** TODO: document or remove block sections
** TODO: test per-dir behavior of block sections
-** TODO: Catch-up documentation on r: methods
-** TODO: 500 errors instead of 404 with AddHandler lua-script
** TODO: Suppress internal details (fs path to scripts, etc) in error responses
* License
@@ -82,3 +75,6 @@
** Brian Akins
** Justin Erenkrantz
** Philip M. Gollucci
+** Stefan Fritsch
+** Eric Covener
+** Daniel Gruno
diff --git a/modules/lua/config.m4 b/modules/lua/config.m4
index 2d1ac052..8a7a11bf 100644
--- a/modules/lua/config.m4
+++ b/modules/lua/config.m4
@@ -136,7 +136,7 @@ else
fi
])
-lua_objects="lua_apr.lo lua_config.lo mod_lua.lo lua_request.lo lua_vmprep.lo lua_dbd.lo"
+lua_objects="lua_apr.lo lua_config.lo mod_lua.lo lua_request.lo lua_vmprep.lo lua_dbd.lo lua_passwd.lo"
APACHE_MODULE(lua, Apache Lua Framework, $lua_objects, , , [
CHECK_LUA()
diff --git a/modules/lua/lua_apr.c b/modules/lua/lua_apr.c
index c93ea9b1..8a1dcf68 100644
--- a/modules/lua/lua_apr.c
+++ b/modules/lua/lua_apr.c
@@ -14,32 +14,20 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "apr.h"
-#include "apr_tables.h"
#include "mod_lua.h"
#include "lua_apr.h"
-/**
- * make a userdata out of a C pointer, and vice versa
- * instead of using lightuserdata
- */
-#ifndef lua_boxpointer
-#define lua_boxpointer(L,u) (*(void **)(lua_newuserdata(L, sizeof(void *))) = (u))
-#define lua_unboxpointer(L,i) (*(void **)(lua_touserdata(L, i)))
-#endif
-
-
-AP_LUA_DECLARE(apr_table_t*) ap_lua_check_apr_table(lua_State *L, int index)
+apr_table_t *ap_lua_check_apr_table(lua_State *L, int index)
{
apr_table_t *t;
luaL_checkudata(L, index, "Apr.Table");
- t = (apr_table_t *) lua_unboxpointer(L, index);
+ t = lua_unboxpointer(L, index);
return t;
}
-AP_LUA_DECLARE(void) ap_lua_push_apr_table(lua_State *L, apr_table_t *t)
+void ap_lua_push_apr_table(lua_State *L, apr_table_t *t)
{
lua_boxpointer(L, t);
luaL_getmetatable(L, "Apr.Table");
@@ -48,9 +36,9 @@ AP_LUA_DECLARE(void) ap_lua_push_apr_table(lua_State *L, apr_table_t *t)
static int lua_table_set(lua_State *L)
{
- apr_table_t *t = ap_lua_check_apr_table(L, 1);
- const char *key = luaL_checkstring(L, 2);
- const char *val = luaL_checkstring(L, 3);
+ apr_table_t *t = ap_lua_check_apr_table(L, 1);
+ const char *key = luaL_checkstring(L, 2);
+ const char *val = luaL_checkstring(L, 3);
apr_table_set(t, key, val);
return 0;
@@ -58,9 +46,9 @@ static int lua_table_set(lua_State *L)
static int lua_table_get(lua_State *L)
{
- apr_table_t *t = ap_lua_check_apr_table(L, 1);
- const char *key = luaL_checkstring(L, 2);
- const char *val = apr_table_get(t, key);
+ apr_table_t *t = ap_lua_check_apr_table(L, 1);
+ const char *key = luaL_checkstring(L, 2);
+ const char *val = apr_table_get(t, key);
lua_pushstring(L, val);
return 1;
}
@@ -72,7 +60,7 @@ static const luaL_Reg lua_table_methods[] = {
};
-AP_LUA_DECLARE(int) ap_lua_init(lua_State *L, apr_pool_t *p)
+int ap_lua_init(lua_State *L, apr_pool_t *p)
{
luaL_newmetatable(L, "Apr.Table");
luaL_register(L, "apr_table", lua_table_methods);
@@ -88,3 +76,6 @@ AP_LUA_DECLARE(int) ap_lua_init(lua_State *L, apr_pool_t *p)
return 0;
}
+
+
+
diff --git a/modules/lua/lua_apr.h b/modules/lua/lua_apr.h
index c66cdde9..8a1428ff 100644
--- a/modules/lua/lua_apr.h
+++ b/modules/lua/lua_apr.h
@@ -18,8 +18,19 @@
#ifndef _LUA_APR_H_
#define _LUA_APR_H_
-AP_LUA_DECLARE(int) ap_lua_init(lua_State *L, apr_pool_t * p);
-AP_LUA_DECLARE(apr_table_t*) ap_lua_check_apr_table(lua_State *L, int index);
-AP_LUA_DECLARE(void) ap_lua_push_apr_table(lua_State *L, apr_table_t *t);
+#include "scoreboard.h"
+#include "http_main.h"
+#include "ap_mpm.h"
+#include "apr_md5.h"
+#include "apr_sha1.h"
+#include "apr_poll.h"
+#include "apr.h"
+#include "apr_tables.h"
+#include "apr_base64.h"
+
+
+int ap_lua_init(lua_State *L, apr_pool_t * p);
+apr_table_t *ap_lua_check_apr_table(lua_State *L, int index);
+void ap_lua_push_apr_table(lua_State *L, apr_table_t *t);
#endif /* !_LUA_APR_H_ */
diff --git a/modules/lua/lua_config.c b/modules/lua/lua_config.c
index 07dd932b..bb082380 100644
--- a/modules/lua/lua_config.c
+++ b/modules/lua/lua_config.c
@@ -51,7 +51,7 @@ static int apl_toscope(const char *name)
return AP_LUA_SCOPE_ONCE;
}
-AP_LUA_DECLARE(apr_status_t) ap_lua_map_handler(ap_lua_dir_cfg *cfg,
+apr_status_t ap_lua_map_handler(ap_lua_dir_cfg *cfg,
const char *file,
const char *function,
const char *pattern,
@@ -257,7 +257,7 @@ static const struct luaL_Reg cmd_methods[] = {
{NULL, NULL}
};
-AP_LUA_DECLARE(void) ap_lua_load_config_lmodule(lua_State *L)
+void ap_lua_load_config_lmodule(lua_State *L)
{
luaL_newmetatable(L, "Apache2.DirConfig"); /* [metatable] */
lua_pushvalue(L, -1);
diff --git a/modules/lua/lua_config.h b/modules/lua/lua_config.h
index d2689da1..8a778ad8 100644
--- a/modules/lua/lua_config.h
+++ b/modules/lua/lua_config.h
@@ -20,9 +20,9 @@
#ifndef _APL_CONFIG_H_
#define _APL_CONFIG_H_
-AP_LUA_DECLARE(void) ap_lua_load_config_lmodule(lua_State *L);
+void ap_lua_load_config_lmodule(lua_State *L);
-AP_LUA_DECLARE(apr_status_t) ap_lua_map_handler(ap_lua_dir_cfg *cfg,
+apr_status_t ap_lua_map_handler(ap_lua_dir_cfg *cfg,
const char *file,
const char *function,
const char *pattern,
diff --git a/modules/lua/lua_dbd.c b/modules/lua/lua_dbd.c
index 350ec247..501156f8 100644
--- a/modules/lua/lua_dbd.c
+++ b/modules/lua/lua_dbd.c
@@ -16,7 +16,6 @@
*/
#include "mod_lua.h"
-#include "lua_apr.h"
#include "lua_dbd.h"
APLOG_USE_MODULE(lua);
@@ -377,7 +376,7 @@ int lua_db_prepared_select(lua_State *L)
st = (lua_db_prepared_statement*) lua_topointer(L, -1);
/* Check if we got enough variables passed on to us.
- * This, of course, only works for prepped statements made through lua. */
+ * This, of course, only works for prepared statements made through lua. */
have = lua_gettop(L) - 2;
if (st->variables != -1 && have < st->variables ) {
lua_pushboolean(L, 0);
@@ -468,7 +467,7 @@ int lua_db_prepared_query(lua_State *L)
st = (lua_db_prepared_statement*) lua_topointer(L, -1);
/* Check if we got enough variables passed on to us.
- * This, of course, only works for prepped statements made through lua. */
+ * This, of course, only works for prepared statements made through lua. */
have = lua_gettop(L) - 2;
if (st->variables != -1 && have < st->variables ) {
lua_pushboolean(L, 0);
@@ -704,7 +703,7 @@ static lua_db_handle* lua_push_db_handle(lua_State *L, request_rec* r, int type,
supported.
=============================================================================
*/
-AP_LUA_DECLARE(int) lua_db_acquire(lua_State *L)
+int lua_db_acquire(lua_State *L)
{
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
const char *type;
diff --git a/modules/lua/lua_dbd.h b/modules/lua/lua_dbd.h
index 6f74efd0..566204b1 100644
--- a/modules/lua/lua_dbd.h
+++ b/modules/lua/lua_dbd.h
@@ -50,7 +50,7 @@ typedef struct {
lua_db_handle *db;
} lua_db_prepared_statement;
-AP_LUA_DECLARE(int) lua_db_acquire(lua_State* L);
+int lua_db_acquire(lua_State* L);
int lua_db_escape(lua_State* L);
int lua_db_close(lua_State* L);
int lua_db_prepare(lua_State* L);
diff --git a/modules/lua/lua_passwd.c b/modules/lua/lua_passwd.c
new file mode 100644
index 00000000..ad865362
--- /dev/null
+++ b/modules/lua/lua_passwd.c
@@ -0,0 +1,178 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "lua_passwd.h"
+#include "apr_strings.h"
+#include "apr_errno.h"
+
+#if APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+#include "apr_md5.h"
+#include "apr_sha1.h"
+
+#if APR_HAVE_TIME_H
+#include <time.h>
+#endif
+#if APR_HAVE_CRYPT_H
+#include <crypt.h>
+#endif
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#if APR_HAVE_IO_H
+#include <io.h>
+#endif
+
+static int generate_salt(char *s, size_t size, const char **errstr,
+ apr_pool_t *pool)
+{
+ unsigned char rnd[32];
+ static const char itoa64[] =
+ "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
+ apr_size_t n;
+ unsigned int val = 0, bits = 0;
+ apr_status_t rv;
+
+ n = (size * 6 + 7)/8;
+ if (n > sizeof(rnd)) {
+ *errstr = apr_psprintf(pool, "generate_salt(): BUG: Buffer too small");
+ return ERR_RANDOM;
+ }
+ rv = apr_generate_random_bytes(rnd, n);
+ if (rv) {
+ *errstr = apr_psprintf(pool, "Unable to generate random bytes: %pm",
+ &rv);
+ return ERR_RANDOM;
+ }
+ n = 0;
+ while (size > 0) {
+ if (bits < 6) {
+ val |= (rnd[n++] << bits);
+ bits += 8;
+ }
+ *s++ = itoa64[val & 0x3f];
+ size--;
+ val >>= 6;
+ bits -= 6;
+ }
+ *s = '\0';
+ return 0;
+}
+
+/*
+ * Make a password record from the given information. A zero return
+ * indicates success; on failure, ctx->errstr points to the error message.
+ */
+int mk_password_hash(passwd_ctx *ctx)
+{
+ char *pw;
+ char salt[16];
+ apr_status_t rv;
+ int ret = 0;
+#if CRYPT_ALGO_SUPPORTED
+ char *cbuf;
+#endif
+
+ pw = ctx->passwd;
+ switch (ctx->alg) {
+ case ALG_APSHA:
+ /* XXX out >= 28 + strlen(sha1) chars - fixed len SHA */
+ apr_sha1_base64(pw, strlen(pw), ctx->out);
+ break;
+
+ case ALG_APMD5:
+ ret = generate_salt(salt, 8, &ctx->errstr, ctx->pool);
+ if (ret != 0) {
+ ret = ERR_GENERAL;
+ break;
+ }
+ rv = apr_md5_encode(pw, salt, ctx->out, ctx->out_len);
+ if (rv != APR_SUCCESS) {
+ ctx->errstr = apr_psprintf(ctx->pool,
+ "could not encode password: %pm", &rv);
+ ret = ERR_GENERAL;
+ }
+ break;
+
+#if CRYPT_ALGO_SUPPORTED
+ case ALG_CRYPT:
+ ret = generate_salt(salt, 8, &ctx->errstr, ctx->pool);
+ if (ret != 0)
+ break;
+ cbuf = crypt(pw, salt);
+ if (cbuf == NULL) {
+ rv = APR_FROM_OS_ERROR(errno);
+ ctx->errstr = apr_psprintf(ctx->pool, "crypt() failed: %pm", &rv);
+ ret = ERR_PWMISMATCH;
+ break;
+ }
+
+ apr_cpystrn(ctx->out, cbuf, ctx->out_len - 1);
+ if (strlen(pw) > 8) {
+ char *truncpw = apr_pstrdup(ctx->pool, pw);
+ truncpw[8] = '\0';
+ if (!strcmp(ctx->out, crypt(truncpw, salt))) {
+ ctx->errstr = apr_psprintf(ctx->pool,
+ "Warning: Password truncated to 8 "
+ "characters by CRYPT algorithm.");
+ }
+ memset(truncpw, '\0', strlen(pw));
+ }
+ break;
+#endif /* CRYPT_ALGO_SUPPORTED */
+
+#if BCRYPT_ALGO_SUPPORTED
+ case ALG_BCRYPT:
+ rv = apr_generate_random_bytes((unsigned char*)salt, 16);
+ if (rv != APR_SUCCESS) {
+ ctx->errstr = apr_psprintf(ctx->pool, "Unable to generate random "
+ "bytes: %pm", &rv);
+ ret = ERR_RANDOM;
+ break;
+ }
+
+ if (ctx->cost == 0)
+ ctx->cost = BCRYPT_DEFAULT_COST;
+ rv = apr_bcrypt_encode(pw, ctx->cost, (unsigned char*)salt, 16,
+ ctx->out, ctx->out_len);
+ if (rv != APR_SUCCESS) {
+ ctx->errstr = apr_psprintf(ctx->pool, "Unable to encode with "
+ "bcrypt: %pm", &rv);
+ ret = ERR_PWMISMATCH;
+ break;
+ }
+ break;
+#endif /* BCRYPT_ALGO_SUPPORTED */
+
+ default:
+ ctx->errstr = apr_psprintf(ctx->pool,
+ "mk_password_hash(): unsupported algorithm %d",
+ ctx->alg);
+ ret = ERR_GENERAL;
+ }
+ memset(pw, '\0', strlen(pw));
+ return ret;
+}
+
+
diff --git a/modules/lua/lua_passwd.h b/modules/lua/lua_passwd.h
new file mode 100644
index 00000000..797556bf
--- /dev/null
+++ b/modules/lua/lua_passwd.h
@@ -0,0 +1,91 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _LUA_PASSWD_H
+#define _LUA_PASSWD_H
+
+#include "apr.h"
+#include "apr_lib.h"
+#include "apr_strings.h"
+#include "apr_errno.h"
+#include "apr_file_io.h"
+#include "apr_general.h"
+#include "apr_version.h"
+#if !APR_VERSION_AT_LEAST(2,0,0)
+#include "apu_version.h"
+#endif
+
+#define MAX_PASSWD_LEN 256
+
+#define ALG_APMD5 0
+#define ALG_APSHA 1
+#define ALG_BCRYPT 2
+#define ALG_CRYPT 3
+
+#define BCRYPT_DEFAULT_COST 5
+
+#define ERR_FILEPERM 1
+#define ERR_SYNTAX 2
+#define ERR_PWMISMATCH 3
+#define ERR_INTERRUPTED 4
+#define ERR_OVERFLOW 5
+#define ERR_BADUSER 6
+#define ERR_INVALID 7
+#define ERR_RANDOM 8
+#define ERR_GENERAL 9
+#define ERR_ALG_NOT_SUPP 10
+
+#if defined(WIN32) || defined(NETWARE)
+#define CRYPT_ALGO_SUPPORTED 0
+#define PLAIN_ALGO_SUPPORTED 1
+#else
+#define CRYPT_ALGO_SUPPORTED 1
+#define PLAIN_ALGO_SUPPORTED 0
+#endif
+
+#if APR_VERSION_AT_LEAST(2,0,0) || \
+ (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 5)
+#define BCRYPT_ALGO_SUPPORTED 1
+#else
+#define BCRYPT_ALGO_SUPPORTED 0
+#endif
+
+typedef struct passwd_ctx passwd_ctx;
+
+struct passwd_ctx {
+ apr_pool_t *pool;
+ const char *errstr;
+ char *out;
+ apr_size_t out_len;
+// const char *passwd;
+ char *passwd;
+ int alg;
+ int cost;
+};
+
+
+/*
+ * The following functions return zero on success; otherwise, one of
+ * the ERR_* codes is returned and an error message is stored in ctx->errstr.
+ */
+
+/*
+ * Make a password record from the given information.
+ */
+int mk_password_hash(passwd_ctx *ctx);
+
+#endif /* _LUA_PASSWD_H */
+
diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c
index 63b192dc..c2cfb535 100644
--- a/modules/lua/lua_request.c
+++ b/modules/lua/lua_request.c
@@ -16,23 +16,38 @@
*/
#include "mod_lua.h"
-#include "util_script.h"
#include "lua_apr.h"
#include "lua_dbd.h"
+#include "lua_passwd.h"
+#include "scoreboard.h"
+#include "util_md5.h"
+#include "util_script.h"
+#include "util_varbuf.h"
+#include "apr_date.h"
+#include "apr_pools.h"
+#include "apr_thread_mutex.h"
+
+#include <lua.h>
+
+extern apr_thread_mutex_t* lua_ivm_mutex;
APLOG_USE_MODULE(lua);
+#define POST_MAX_VARS 500
+
+#ifndef MODLUA_MAX_REG_MATCH
+#define MODLUA_MAX_REG_MATCH 25
+#endif
typedef char *(*req_field_string_f) (request_rec * r);
typedef int (*req_field_int_f) (request_rec * r);
typedef apr_table_t *(*req_field_apr_table_f) (request_rec * r);
+
void ap_lua_rstack_dump(lua_State *L, request_rec *r, const char *msg)
{
int i;
int top = lua_gettop(L);
-
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01484) "Lua Stack Dump: [%s]", msg);
-
for (i = 1; i <= top; i++) {
int t = lua_type(L, i);
switch (t) {
@@ -153,6 +168,87 @@ static int req_aprtable2luatable_cb(void *l, const char *key,
return 1;
}
+
+/*
+ =======================================================================================================================
+ lua_read_body(request_rec *r, const char **rbuf, apr_off_t *size): Reads any additional form data sent in POST/PUT
+ requests. Used for multipart POST data.
+ =======================================================================================================================
+ */
+static int lua_read_body(request_rec *r, const char **rbuf, apr_off_t *size)
+{
+ int rc = OK;
+
+ if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR))) {
+ return (rc);
+ }
+ if (ap_should_client_block(r)) {
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ char argsbuffer[HUGE_STRING_LEN];
+ apr_off_t rsize, len_read, rpos = 0;
+ apr_off_t length = r->remaining;
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+ *rbuf = (const char *) apr_pcalloc(r->pool, (apr_size_t) (length + 1));
+ *size = length;
+ while ((len_read = ap_get_client_block(r, argsbuffer, sizeof(argsbuffer))) > 0) {
+ if ((rpos + len_read) > length) {
+ rsize = length - rpos;
+ }
+ else {
+ rsize = len_read;
+ }
+
+ memcpy((char *) *rbuf + rpos, argsbuffer, (size_t) rsize);
+ rpos += rsize;
+ }
+ }
+
+ return (rc);
+}
+
+
+/*
+ * =======================================================================================================================
+ * lua_write_body: Reads any additional form data sent in POST/PUT requests
+ * and writes to a file.
+ * =======================================================================================================================
+ */
+static apr_status_t lua_write_body(request_rec *r, apr_file_t *file, apr_off_t *size)
+{
+ apr_status_t rc = OK;
+
+ if ((rc = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)))
+ return rc;
+ if (ap_should_client_block(r)) {
+ char argsbuffer[HUGE_STRING_LEN];
+ apr_off_t rsize,
+ len_read,
+ rpos = 0;
+ apr_off_t length = r->remaining;
+ apr_size_t written;
+
+ *size = length;
+ while ((len_read =
+ ap_get_client_block(r, argsbuffer,
+ sizeof(argsbuffer))) > 0) {
+ if ((rpos + len_read) > length)
+ rsize = (apr_size_t) length - rpos;
+ else
+ rsize = len_read;
+
+ rc = apr_file_write_full(file, argsbuffer, (apr_size_t) rsize,
+ &written);
+ if (written != rsize || rc != OK)
+ return APR_ENOSPC;
+ rpos += rsize;
+ }
+ }
+
+ return rc;
+}
+
/* r:parseargs() returning a lua table */
static int req_parseargs(lua_State *L)
{
@@ -165,7 +261,7 @@ static int req_parseargs(lua_State *L)
return 2; /* [table<string, string>, table<string, array<string>>] */
}
-/* r:parsebody() returning a lua table */
+/* r:parsebody(): Parses regular (url-enocded) or multipart POST data and returns two tables*/
static int req_parsebody(lua_State *L)
{
apr_array_header_t *pairs;
@@ -173,26 +269,127 @@ static int req_parsebody(lua_State *L)
int res;
apr_size_t size;
apr_size_t max_post_size;
- char *buffer;
+ char *multipart;
+ const char *contentType;
request_rec *r = ap_lua_check_request_rec(L, 1);
max_post_size = (apr_size_t) luaL_optint(L, 2, MAX_STRING_LEN);
+ multipart = apr_pcalloc(r->pool, 256);
+ contentType = apr_table_get(r->headers_in, "Content-Type");
lua_newtable(L);
- lua_newtable(L); /* [table, table] */
- res = ap_parse_form_data(r, NULL, &pairs, -1, max_post_size);
- if (res == OK) {
- while(pairs && !apr_is_empty_array(pairs)) {
- ap_form_pair_t *pair = (ap_form_pair_t *) apr_array_pop(pairs);
- apr_brigade_length(pair->value, 1, &len);
- size = (apr_size_t) len;
- buffer = apr_palloc(r->pool, size + 1);
- apr_brigade_flatten(pair->value, buffer, &size);
- buffer[len] = 0;
- req_aprtable2luatable_cb(L, pair->name, buffer);
+ lua_newtable(L); /* [table, table] */
+ if (contentType != NULL && (sscanf(contentType, "multipart/form-data; boundary=%250c", multipart) == 1)) {
+ char *buffer, *key, *filename;
+ char *start = 0, *end = 0, *crlf = 0;
+ const char *data;
+ int i;
+ size_t vlen = 0;
+ size_t len = 0;
+ if (lua_read_body(r, &data, (apr_off_t*) &size) != OK) {
+ return 2;
+ }
+ len = strlen(multipart);
+ i = 0;
+ for
+ (
+ start = strstr((char *) data, multipart);
+ start != start + size;
+ start = end
+ ) {
+ i++;
+ if (i == POST_MAX_VARS) break;
+ end = strstr((char *) (start + 1), multipart);
+ if (!end) end = start + size;
+ crlf = strstr((char *) start, "\r\n\r\n");
+ if (!crlf) break;
+ key = (char *) apr_pcalloc(r->pool, 256);
+ filename = (char *) apr_pcalloc(r->pool, 256);
+ vlen = end - crlf - 8;
+ buffer = (char *) apr_pcalloc(r->pool, vlen+1);
+ memcpy(buffer, crlf + 4, vlen);
+ sscanf(start + len + 2,
+ "Content-Disposition: form-data; name=\"%255[^\"]\"; filename=\"%255[^\"]\"",
+ key, filename);
+ if (strlen(key)) {
+ req_aprtable2luatable_cb(L, key, buffer);
+ }
+ }
+ }
+ else {
+ char *buffer;
+ res = ap_parse_form_data(r, NULL, &pairs, -1, max_post_size);
+ if (res == OK) {
+ while(pairs && !apr_is_empty_array(pairs)) {
+ ap_form_pair_t *pair = (ap_form_pair_t *) apr_array_pop(pairs);
+ apr_brigade_length(pair->value, 1, &len);
+ size = (apr_size_t) len;
+ buffer = apr_palloc(r->pool, size + 1);
+ apr_brigade_flatten(pair->value, buffer, &size);
+ buffer[len] = 0;
+ req_aprtable2luatable_cb(L, pair->name, buffer);
+ }
}
}
return 2; /* [table<string, string>, table<string, array<string>>] */
}
+
+/*
+ * lua_ap_requestbody; r:requestbody([filename]) - Reads or stores the request
+ * body
+ */
+static int lua_ap_requestbody(lua_State *L)
+{
+ const char *filename;
+ request_rec *r;
+ apr_off_t maxSize;
+
+ r = ap_lua_check_request_rec(L, 1);
+ filename = luaL_optstring(L, 2, 0);
+ maxSize = luaL_optint(L, 3, 0);
+
+ if (r) {
+ apr_off_t size;
+ if (maxSize > 0 && r->remaining > maxSize) {
+ lua_pushnil(L);
+ lua_pushliteral(L, "Request body was larger than the permitted size.");
+ return 2;
+ }
+ if (r->method_number != M_POST && r->method_number != M_PUT)
+ return (0);
+ if (!filename) {
+ const char *data;
+
+ if (lua_read_body(r, &data, &size) != OK)
+ return (0);
+
+ lua_pushlstring(L, data, (size_t) size);
+ lua_pushinteger(L, (lua_Integer) size);
+ return (2);
+ } else {
+ apr_status_t rc;
+ apr_file_t *file;
+
+ rc = apr_file_open(&file, filename, APR_CREATE | APR_FOPEN_WRITE,
+ APR_FPROT_OS_DEFAULT, r->pool);
+ lua_settop(L, 0);
+ if (rc == APR_SUCCESS) {
+ rc = lua_write_body(r, file, &size);
+ apr_file_close(file);
+ if (rc != OK) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+ lua_pushinteger(L, (lua_Integer) size);
+ return (1);
+ } else
+ lua_pushboolean(L, 0);
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
/* wrap ap_rputs as r:puts(String) */
static int req_puts(lua_State *L)
{
@@ -212,10 +409,12 @@ static int req_write(lua_State *L)
{
request_rec *r = ap_lua_check_request_rec(L, 1);
size_t n;
+ int rv;
const char *buf = luaL_checklstring(L, 2, &n);
- ap_rwrite((void *) buf, n, r);
- return 0;
+ rv = ap_rwrite((void *) buf, n, r);
+ lua_pushinteger(L, rv);
+ return 1;
}
/* r:addoutputfilter(name|function) */
@@ -246,6 +445,7 @@ static int req_escape_html(lua_State *L)
lua_pushstring(L, ap_escape_html(r->pool, s));
return 1;
}
+
/* wrap optional ssl_var_lookup as r:ssl_var_lookup(String) */
static int req_ssl_var_lookup(lua_State *L)
{
@@ -256,6 +456,7 @@ static int req_ssl_var_lookup(lua_State *L)
lua_pushstring(L, res);
return 1;
}
+
/* BEGIN dispatch mathods for request_rec fields */
/* not really a field, but we treat it like one */
@@ -372,6 +573,11 @@ static const char *req_useragent_ip_field(request_rec *r)
return r->useragent_ip;
}
+static int req_remaining_field(request_rec *r)
+{
+ return r->remaining;
+}
+
static int req_status_field(request_rec *r)
{
return r->status;
@@ -412,7 +618,1095 @@ static int req_ssl_is_https_field(request_rec *r)
return ap_lua_ssl_is_https(r->connection);
}
-/* END dispatch mathods for request_rec fields */
+static int req_ap_get_server_port(request_rec *r)
+{
+ return (int) ap_get_server_port(r);
+}
+
+static int lua_ap_rflush (lua_State *L) {
+
+ int returnValue;
+ request_rec *r;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ returnValue = ap_rflush(r);
+ lua_pushboolean(L, (returnValue == 0));
+ return 1;
+}
+
+
+static const char* lua_ap_options(request_rec* r)
+{
+ int opts;
+ opts = ap_allow_options(r);
+ return apr_psprintf(r->pool, "%s %s %s %s %s %s", (opts&OPT_INDEXES) ? "Indexes" : "", (opts&OPT_INCLUDES) ? "Includes" : "", (opts&OPT_SYM_LINKS) ? "FollowSymLinks" : "", (opts&OPT_EXECCGI) ? "ExecCGI" : "", (opts&OPT_MULTI) ? "MultiViews" : "", (opts&OPT_ALL) == OPT_ALL ? "All" : "" );
+}
+
+static const char* lua_ap_allowoverrides(request_rec* r)
+{
+ int opts;
+ opts = ap_allow_overrides(r);
+ return apr_psprintf(r->pool, "%s %s %s %s %s %s", (opts&OR_NONE) ? "None" : "", (opts&OR_LIMIT) ? "Limit" : "", (opts&OR_OPTIONS) ? "Options" : "", (opts&OR_FILEINFO) ? "FileInfo" : "", (opts&OR_AUTHCFG) ? "AuthCfg" : "", (opts&OR_INDEXES) ? "Indexes" : "" );
+}
+
+static int lua_ap_started(request_rec* r)
+{
+ return (int)(ap_scoreboard_image->global->restart_time / 1000000);
+}
+
+static const char* lua_ap_basic_auth_pw(request_rec* r)
+{
+ const char* pw = NULL;
+ ap_get_basic_auth_pw(r, &pw);
+ return pw ? pw : "";
+}
+
+static int lua_ap_limit_req_body(request_rec* r)
+{
+ return (int) ap_get_limit_req_body(r);
+}
+
+static int lua_ap_is_initial_req(request_rec *r)
+{
+ return ap_is_initial_req(r);
+}
+
+static int lua_ap_some_auth_required(request_rec *r)
+{
+ return ap_some_auth_required(r);
+}
+
+static int lua_ap_sendfile(lua_State *L)
+{
+
+ apr_finfo_t file_info;
+ const char *filename;
+ request_rec *r;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ r = ap_lua_check_request_rec(L, 1);
+ filename = lua_tostring(L, 2);
+ apr_stat(&file_info, filename, APR_FINFO_MIN, r->pool);
+ if (file_info.filetype == APR_NOFILE || file_info.filetype == APR_DIR) {
+ lua_pushboolean(L, 0);
+ }
+ else {
+ apr_size_t sent;
+ apr_status_t rc;
+ apr_file_t *file;
+
+ rc = apr_file_open(&file, filename, APR_READ, APR_OS_DEFAULT,
+ r->pool);
+ if (rc == APR_SUCCESS) {
+ ap_send_fd(file, r, 0, (apr_size_t)file_info.size, &sent);
+ apr_file_close(file);
+ lua_pushinteger(L, sent);
+ }
+ else {
+ lua_pushboolean(L, 0);
+ }
+ }
+
+ return (1);
+}
+
+
+/*
+ * lua_apr_b64encode; r:encode_base64(string) - encodes a string to Base64
+ * format
+ */
+static int lua_apr_b64encode(lua_State *L)
+{
+ const char *plain;
+ char *encoded;
+ size_t plain_len, encoded_len;
+ request_rec *r;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ plain = lua_tolstring(L, 2, &plain_len);
+ encoded_len = apr_base64_encode_len(plain_len);
+ if (encoded_len) {
+ encoded = apr_palloc(r->pool, encoded_len);
+ encoded_len = apr_base64_encode(encoded, plain, plain_len);
+ if (encoded_len > 0 && encoded[encoded_len - 1] == '\0')
+ encoded_len--;
+ lua_pushlstring(L, encoded, encoded_len);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * lua_apr_b64decode; r:decode_base64(string) - decodes a Base64 string
+ */
+static int lua_apr_b64decode(lua_State *L)
+{
+ const char *encoded;
+ char *plain;
+ size_t encoded_len, decoded_len;
+ request_rec *r;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ encoded = lua_tolstring(L, 2, &encoded_len);
+ decoded_len = apr_base64_decode_len(encoded);
+ if (decoded_len) {
+ plain = apr_palloc(r->pool, decoded_len);
+ decoded_len = apr_base64_decode(plain, encoded);
+ if (decoded_len > 0 && plain[decoded_len - 1] == '\0')
+ decoded_len--;
+ lua_pushlstring(L, plain, decoded_len);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * lua_ap_unescape; r:unescape(string) - Unescapes an URL-encoded string
+ */
+static int lua_ap_unescape(lua_State *L)
+{
+ const char *escaped;
+ char *plain;
+ size_t x,
+ y;
+ request_rec *r;
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ escaped = lua_tolstring(L, 2, &x);
+ plain = apr_pstrdup(r->pool, escaped);
+ y = ap_unescape_urlencoded(plain);
+ if (!y) {
+ lua_pushstring(L, plain);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * lua_ap_escape; r:escape(string) - URL-escapes a string
+ */
+static int lua_ap_escape(lua_State *L)
+{
+ const char *plain;
+ char *escaped;
+ size_t x;
+ request_rec *r;
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ plain = lua_tolstring(L, 2, &x);
+ escaped = ap_escape_urlencoded(r->pool, plain);
+ lua_pushstring(L, escaped);
+ return 1;
+}
+
+/*
+ * lua_apr_md5; r:md5(string) - Calculates an MD5 digest of a string
+ */
+static int lua_apr_md5(lua_State *L)
+{
+ const char *buffer;
+ char *result;
+ size_t len;
+ request_rec *r;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ buffer = lua_tolstring(L, 2, &len);
+ result = ap_md5_binary(r->pool, (const unsigned char *)buffer, len);
+ lua_pushstring(L, result);
+ return 1;
+}
+
+/*
+ * lua_apr_sha1; r:sha1(string) - Calculates the SHA1 digest of a string
+ */
+static int lua_apr_sha1(lua_State *L)
+{
+ unsigned char digest[APR_SHA1_DIGESTSIZE];
+ apr_sha1_ctx_t sha1;
+ const char *buffer;
+ char *result;
+ size_t len;
+ request_rec *r;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ result = apr_pcalloc(r->pool, sizeof(digest) * 2 + 1);
+ buffer = lua_tolstring(L, 2, &len);
+ apr_sha1_init(&sha1);
+ apr_sha1_update(&sha1, buffer, len);
+ apr_sha1_final(digest, &sha1);
+ ap_bin2hex(digest, sizeof(digest), result);
+ lua_pushstring(L, result);
+ return 1;
+}
+
+/*
+ * lua_apr_htpassword; r:htpassword(string [, algorithm [, cost]]) - Creates
+ * a htpassword hash from a string
+ */
+static int lua_apr_htpassword(lua_State *L)
+{
+ passwd_ctx ctx = { 0 };
+ request_rec *r;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ ctx.passwd = apr_pstrdup(r->pool, lua_tostring(L, 2));
+ ctx.alg = luaL_optinteger(L, 3, ALG_APMD5);
+ ctx.cost = luaL_optinteger(L, 4, 0);
+ ctx.pool = r->pool;
+ ctx.out = apr_pcalloc(r->pool, MAX_PASSWD_LEN);
+ ctx.out_len = MAX_PASSWD_LEN;
+ if (mk_password_hash(&ctx)) {
+ lua_pushboolean(L, 0);
+ lua_pushstring(L, ctx.errstr);
+ return 2;
+ } else {
+ lua_pushstring(L, ctx.out);
+ }
+ return 1;
+}
+
+/*
+ * lua_apr_touch; r:touch(string [, time]) - Sets mtime of a file
+ */
+static int lua_apr_touch(lua_State *L)
+{
+ request_rec *r;
+ const char *path;
+ apr_status_t status;
+ apr_time_t mtime;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ path = lua_tostring(L, 2);
+ mtime = luaL_optnumber(L, 3, apr_time_now());
+ status = apr_file_mtime_set(path, mtime, r->pool);
+ lua_pushboolean(L, (status == 0));
+ return 1;
+}
+
+/*
+ * lua_apr_mkdir; r:mkdir(string [, permissions]) - Creates a directory
+ */
+static int lua_apr_mkdir(lua_State *L)
+{
+ request_rec *r;
+ const char *path;
+ apr_status_t status;
+ apr_fileperms_t perms;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ path = lua_tostring(L, 2);
+ perms = luaL_optinteger(L, 3, APR_OS_DEFAULT);
+ status = apr_dir_make(path, perms, r->pool);
+ lua_pushboolean(L, (status == 0));
+ return 1;
+}
+
+/*
+ * lua_apr_mkrdir; r:mkrdir(string [, permissions]) - Creates directories
+ * recursive
+ */
+static int lua_apr_mkrdir(lua_State *L)
+{
+ request_rec *r;
+ const char *path;
+ apr_status_t status;
+ apr_fileperms_t perms;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ path = lua_tostring(L, 2);
+ perms = luaL_optinteger(L, 3, APR_OS_DEFAULT);
+ status = apr_dir_make_recursive(path, perms, r->pool);
+ lua_pushboolean(L, (status == 0));
+ return 1;
+}
+
+/*
+ * lua_apr_rmdir; r:rmdir(string) - Removes a directory
+ */
+static int lua_apr_rmdir(lua_State *L)
+{
+ request_rec *r;
+ const char *path;
+ apr_status_t status;
+
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ path = lua_tostring(L, 2);
+ status = apr_dir_remove(path, r->pool);
+ lua_pushboolean(L, (status == 0));
+ return 1;
+}
+
+/*
+ * lua_apr_date_parse_rfc; r.date_parse_rfc(string) - Parses a DateTime string
+ */
+static int lua_apr_date_parse_rfc(lua_State *L)
+{
+ const char *input;
+ apr_time_t result;
+
+ luaL_checktype(L, 1, LUA_TSTRING);
+ input = lua_tostring(L, 1);
+ result = apr_date_parse_rfc(input);
+ if (result == 0)
+ return 0;
+ lua_pushnumber(L, (lua_Number)(result / APR_USEC_PER_SEC));
+ return 1;
+}
+
+/*
+ * lua_ap_mpm_query; r:mpm_query(info) - Queries for MPM info
+ */
+static int lua_ap_mpm_query(lua_State *L)
+{
+ int x,
+ y;
+
+ x = lua_tointeger(L, 1);
+ ap_mpm_query(x, &y);
+ lua_pushinteger(L, y);
+ return 1;
+}
+
+/*
+ * lua_ap_expr; r:expr(string) - Evaluates an expr statement.
+ */
+static int lua_ap_expr(lua_State *L)
+{
+ request_rec *r;
+ int x = 0;
+ const char *expr,
+ *err;
+ ap_expr_info_t res;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ r = ap_lua_check_request_rec(L, 1);
+ expr = lua_tostring(L, 2);
+
+
+ res.filename = NULL;
+ res.flags = 0;
+ res.line_number = 0;
+ res.module_index = APLOG_MODULE_INDEX;
+
+ err = ap_expr_parse(r->pool, r->pool, &res, expr, NULL);
+ if (!err) {
+ x = ap_expr_exec(r, &res, &err);
+ lua_pushboolean(L, x);
+ if (x < 0) {
+ lua_pushstring(L, err);
+ return 2;
+ }
+ return 1;
+ } else {
+ lua_pushboolean(L, 0);
+ lua_pushstring(L, err);
+ return 2;
+ }
+ lua_pushboolean(L, 0);
+ return 1;
+}
+
+
+/*
+ * lua_ap_regex; r:regex(string, pattern [, flags])
+ * - Evaluates a regex and returns captures if matched
+ */
+static int lua_ap_regex(lua_State *L)
+{
+ request_rec *r;
+ int i,
+ rv,
+ flags;
+ const char *pattern,
+ *source;
+ char *err;
+ ap_regex_t regex;
+ ap_regmatch_t matches[MODLUA_MAX_REG_MATCH+1];
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ luaL_checktype(L, 3, LUA_TSTRING);
+ r = ap_lua_check_request_rec(L, 1);
+ source = lua_tostring(L, 2);
+ pattern = lua_tostring(L, 3);
+ flags = luaL_optinteger(L, 4, 0);
+
+ rv = ap_regcomp(&regex, pattern, flags);
+ if (rv) {
+ lua_pushboolean(L, 0);
+ err = apr_palloc(r->pool, 256);
+ ap_regerror(rv, &regex, err, 256);
+ lua_pushstring(L, err);
+ return 2;
+ }
+
+ if (regex.re_nsub > MODLUA_MAX_REG_MATCH) {
+ lua_pushboolean(L, 0);
+ err = apr_palloc(r->pool, 64);
+ apr_snprintf(err, 64,
+ "regcomp found %d matches; only %d allowed.",
+ regex.re_nsub, MODLUA_MAX_REG_MATCH);
+ lua_pushstring(L, err);
+ return 2;
+ }
+
+ rv = ap_regexec(&regex, source, MODLUA_MAX_REG_MATCH, matches, 0);
+ if (rv == AP_REG_NOMATCH) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ lua_newtable(L);
+ for (i = 0; i <= regex.re_nsub; i++) {
+ lua_pushinteger(L, i);
+ if (matches[i].rm_so >= 0 && matches[i].rm_eo >= 0)
+ lua_pushstring(L,
+ apr_pstrndup(r->pool, source + matches[i].rm_so,
+ matches[i].rm_eo - matches[i].rm_so));
+ else
+ lua_pushnil(L);
+ lua_settable(L, -3);
+
+ }
+ return 1;
+}
+
+
+
+
+/*
+ * lua_ap_scoreboard_process; r:scoreboard_process(a) - returns scoreboard info
+ */
+static int lua_ap_scoreboard_process(lua_State *L)
+{
+ int i;
+ process_score *ps_record;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TNUMBER);
+ i = lua_tointeger(L, 2);
+ ps_record = ap_get_scoreboard_process(i);
+ if (ps_record) {
+ lua_newtable(L);
+
+ lua_pushstring(L, "connections");
+ lua_pushnumber(L, ps_record->connections);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "keepalive");
+ lua_pushnumber(L, ps_record->keep_alive);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "lingering_close");
+ lua_pushnumber(L, ps_record->lingering_close);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "pid");
+ lua_pushnumber(L, ps_record->pid);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "suspended");
+ lua_pushnumber(L, ps_record->suspended);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "write_completion");
+ lua_pushnumber(L, ps_record->write_completion);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "not_accepting");
+ lua_pushnumber(L, ps_record->not_accepting);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "quiescing");
+ lua_pushnumber(L, ps_record->quiescing);
+ lua_settable(L, -3);
+
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * lua_ap_scoreboard_worker; r:scoreboard_worker(proc, thread) - Returns thread
+ * info
+ */
+static int lua_ap_scoreboard_worker(lua_State *L)
+{
+ int i,
+ j;
+ worker_score *ws_record;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TNUMBER);
+ luaL_checktype(L, 3, LUA_TNUMBER);
+ i = lua_tointeger(L, 2);
+ j = lua_tointeger(L, 3);
+ ws_record = ap_get_scoreboard_worker_from_indexes(i, j);
+ if (ws_record) {
+ lua_newtable(L);
+
+ lua_pushstring(L, "access_count");
+ lua_pushnumber(L, ws_record->access_count);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "bytes_served");
+ lua_pushnumber(L, (lua_Number) ws_record->bytes_served);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "client");
+ lua_pushstring(L, ws_record->client);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "conn_bytes");
+ lua_pushnumber(L, (lua_Number) ws_record->conn_bytes);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "conn_count");
+ lua_pushnumber(L, ws_record->conn_count);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "generation");
+ lua_pushnumber(L, ws_record->generation);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "last_used");
+ lua_pushnumber(L, (lua_Number) ws_record->last_used);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "pid");
+ lua_pushnumber(L, ws_record->pid);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "request");
+ lua_pushstring(L, ws_record->request);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "start_time");
+ lua_pushnumber(L, (lua_Number) ws_record->start_time);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "status");
+ lua_pushnumber(L, ws_record->status);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "stop_time");
+ lua_pushnumber(L, (lua_Number) ws_record->stop_time);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "tid");
+
+ lua_pushinteger(L, (lua_Integer) ws_record->tid);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "vhost");
+ lua_pushstring(L, ws_record->vhost);
+ lua_settable(L, -3);
+#ifdef HAVE_TIMES
+ lua_pushstring(L, "stimes");
+ lua_pushnumber(L, ws_record->times.tms_stime);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "utimes");
+ lua_pushnumber(L, ws_record->times.tms_utime);
+ lua_settable(L, -3);
+#endif
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * lua_ap_clock; r:clock() - Returns timestamp with microsecond precision
+ */
+static int lua_ap_clock(lua_State *L)
+{
+ apr_time_t now;
+ now = apr_time_now();
+ lua_pushnumber(L, (lua_Number) now);
+ return 1;
+}
+
+/*
+ * lua_ap_add_input_filter; r:add_input_filter(name) - Adds an input filter to
+ * the chain
+ */
+static int lua_ap_add_input_filter(lua_State *L)
+{
+ request_rec *r;
+ const char *filterName;
+ ap_filter_rec_t *filter;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ r = ap_lua_check_request_rec(L, 1);
+ filterName = lua_tostring(L, 2);
+ filter = ap_get_input_filter_handle(filterName);
+ if (filter) {
+ ap_add_input_filter_handle(filter, NULL, r, r->connection);
+ lua_pushboolean(L, 1);
+ } else
+ lua_pushboolean(L, 0);
+ return 1;
+}
+
+
+/*
+ * lua_ap_module_info; r:module_info(mod_name) - Returns information about a
+ * loaded module
+ */
+static int lua_ap_module_info(lua_State *L)
+{
+ const char *moduleName;
+ module *mod;
+
+ luaL_checktype(L, 1, LUA_TSTRING);
+ moduleName = lua_tostring(L, 1);
+ mod = ap_find_linked_module(moduleName);
+ if (mod && mod->cmds) {
+ const command_rec *cmd;
+ lua_newtable(L);
+ lua_pushstring(L, "commands");
+ lua_newtable(L);
+ for (cmd = mod->cmds; cmd->name; ++cmd) {
+ lua_pushstring(L, cmd->name);
+ lua_pushstring(L, cmd->errmsg);
+ lua_settable(L, -3);
+ }
+ lua_settable(L, -3);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * lua_ap_runtime_dir_relative: r:runtime_dir_relative(file): Returns the
+ * filename as relative to the runtime dir
+ */
+static int lua_ap_runtime_dir_relative(lua_State *L)
+{
+ request_rec *r;
+ const char *file;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ file = luaL_optstring(L, 2, ".");
+ lua_pushstring(L, ap_runtime_dir_relative(r->pool, file));
+ return 1;
+}
+
+/*
+ * lua_ap_set_document_root; r:set_document_root(path) - sets the current doc
+ * root for the request
+ */
+static int lua_ap_set_document_root(lua_State *L)
+{
+ request_rec *r;
+ const char *root;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ r = ap_lua_check_request_rec(L, 1);
+ root = lua_tostring(L, 2);
+ ap_set_document_root(r, root);
+ return 0;
+}
+
+/*
+ * lua_ap_getdir; r:get_direntries(directory) - Gets all entries of a
+ * directory and returns the directory info as a table
+ */
+static int lua_ap_getdir(lua_State *L)
+{
+ request_rec *r;
+ apr_dir_t *thedir;
+ apr_finfo_t file_info;
+ apr_status_t status;
+ const char *directory;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ r = ap_lua_check_request_rec(L, 1);
+ directory = lua_tostring(L, 2);
+ if (apr_dir_open(&thedir, directory, r->pool) == APR_SUCCESS) {
+ int i = 0;
+ lua_newtable(L);
+ do {
+ status = apr_dir_read(&file_info, APR_FINFO_NAME, thedir);
+ if (APR_STATUS_IS_INCOMPLETE(status)) {
+ continue; /* ignore un-stat()able files */
+ }
+ else if (status != APR_SUCCESS) {
+ break;
+ }
+ lua_pushinteger(L, ++i);
+ lua_pushstring(L, file_info.name);
+ lua_settable(L, -3);
+
+ } while (1);
+ apr_dir_close(thedir);
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+/*
+ * lua_ap_stat; r:stat(filename [, wanted]) - Runs stat on a file and
+ * returns the file info as a table
+ */
+static int lua_ap_stat(lua_State *L)
+{
+ request_rec *r;
+ const char *filename;
+ apr_finfo_t file_info;
+ apr_int32_t wanted;
+
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ r = ap_lua_check_request_rec(L, 1);
+ filename = lua_tostring(L, 2);
+ wanted = luaL_optinteger(L, 3, APR_FINFO_MIN);
+ if (apr_stat(&file_info, filename, wanted, r->pool) == OK) {
+ lua_newtable(L);
+ if (wanted & APR_FINFO_MTIME) {
+ lua_pushstring(L, "mtime");
+ lua_pushnumber(L, (lua_Number) file_info.mtime);
+ lua_settable(L, -3);
+ }
+ if (wanted & APR_FINFO_ATIME) {
+ lua_pushstring(L, "atime");
+ lua_pushnumber(L, (lua_Number) file_info.atime);
+ lua_settable(L, -3);
+ }
+ if (wanted & APR_FINFO_CTIME) {
+ lua_pushstring(L, "ctime");
+ lua_pushnumber(L, (lua_Number) file_info.ctime);
+ lua_settable(L, -3);
+ }
+ if (wanted & APR_FINFO_SIZE) {
+ lua_pushstring(L, "size");
+ lua_pushnumber(L, (lua_Number) file_info.size);
+ lua_settable(L, -3);
+ }
+ if (wanted & APR_FINFO_TYPE) {
+ lua_pushstring(L, "filetype");
+ lua_pushinteger(L, file_info.filetype);
+ lua_settable(L, -3);
+ }
+ if (wanted & APR_FINFO_PROT) {
+ lua_pushstring(L, "protection");
+ lua_pushinteger(L, file_info.protection);
+ lua_settable(L, -3);
+ }
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+/*
+ * lua_ap_loaded_modules; r:loaded_modules() - Returns a list of loaded modules
+ */
+static int lua_ap_loaded_modules(lua_State *L)
+{
+ int i;
+ lua_newtable(L);
+ for (i = 0; ap_loaded_modules[i] && ap_loaded_modules[i]->name; i++) {
+ lua_pushinteger(L, i + 1);
+ lua_pushstring(L, ap_loaded_modules[i]->name);
+ lua_settable(L, -3);
+ }
+ return 1;
+}
+
+/*
+ * lua_ap_server_info; r:server_info() - Returns server info, such as the
+ * executable filename, server root, mpm etc
+ */
+static int lua_ap_server_info(lua_State *L)
+{
+ lua_newtable(L);
+
+ lua_pushstring(L, "server_executable");
+ lua_pushstring(L, ap_server_argv0);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "server_root");
+ lua_pushstring(L, ap_server_root);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "scoreboard_fname");
+ lua_pushstring(L, ap_scoreboard_fname);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "server_mpm");
+ lua_pushstring(L, ap_show_mpm());
+ lua_settable(L, -3);
+
+ return 1;
+}
+
+
+/*
+ * === Auto-scraped functions ===
+ */
+
+
+/**
+ * ap_set_context_info: Set context_prefix and context_document_root.
+ * @param r The request
+ * @param prefix the URI prefix, without trailing slash
+ * @param document_root the corresponding directory on disk, without trailing
+ * slash
+ * @note If one of prefix of document_root is NULL, the corrsponding
+ * property will not be changed.
+ */
+static int lua_ap_set_context_info(lua_State *L)
+{
+ request_rec *r;
+ const char *prefix;
+ const char *document_root;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ prefix = lua_tostring(L, 2);
+ luaL_checktype(L, 3, LUA_TSTRING);
+ document_root = lua_tostring(L, 3);
+ ap_set_context_info(r, prefix, document_root);
+ return 0;
+}
+
+
+/**
+ * ap_os_escape_path (apr_pool_t *p, const char *path, int partial)
+ * convert an OS path to a URL in an OS dependant way.
+ * @param p The pool to allocate from
+ * @param path The path to convert
+ * @param partial if set, assume that the path will be appended to something
+ * with a '/' in it (and thus does not prefix "./")
+ * @return The converted URL
+ */
+static int lua_ap_os_escape_path(lua_State *L)
+{
+ char *returnValue;
+ request_rec *r;
+ const char *path;
+ int partial = 0;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ path = lua_tostring(L, 2);
+ if (lua_isboolean(L, 3))
+ partial = lua_toboolean(L, 3);
+ returnValue = ap_os_escape_path(r->pool, path, partial);
+ lua_pushstring(L, returnValue);
+ return 1;
+}
+
+
+/**
+ * ap_escape_logitem (apr_pool_t *p, const char *str)
+ * Escape a string for logging
+ * @param p The pool to allocate from
+ * @param str The string to escape
+ * @return The escaped string
+ */
+static int lua_ap_escape_logitem(lua_State *L)
+{
+ char *returnValue;
+ request_rec *r;
+ const char *str;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ str = lua_tostring(L, 2);
+ returnValue = ap_escape_logitem(r->pool, str);
+ lua_pushstring(L, returnValue);
+ return 1;
+}
+
+/**
+ * ap_strcmp_match (const char *str, const char *expected)
+ * Determine if a string matches a patterm containing the wildcards '?' or '*'
+ * @param str The string to check
+ * @param expected The pattern to match against
+ * @param ignoreCase Whether to ignore case when matching
+ * @return 1 if the two strings match, 0 otherwise
+ */
+static int lua_ap_strcmp_match(lua_State *L)
+{
+ int returnValue;
+ const char *str;
+ const char *expected;
+ int ignoreCase = 0;
+ luaL_checktype(L, 1, LUA_TSTRING);
+ str = lua_tostring(L, 1);
+ luaL_checktype(L, 2, LUA_TSTRING);
+ expected = lua_tostring(L, 2);
+ if (lua_isboolean(L, 3))
+ ignoreCase = lua_toboolean(L, 3);
+ if (!ignoreCase)
+ returnValue = ap_strcmp_match(str, expected);
+ else
+ returnValue = ap_strcasecmp_match(str, expected);
+ lua_pushboolean(L, (!returnValue));
+ return 1;
+}
+
+
+/**
+ * ap_set_keepalive (request_rec *r)
+ * Set the keepalive status for this request
+ * @param r The current request
+ * @return 1 if keepalive can be set, 0 otherwise
+ */
+static int lua_ap_set_keepalive(lua_State *L)
+{
+ int returnValue;
+ request_rec *r;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ returnValue = ap_set_keepalive(r);
+ lua_pushboolean(L, returnValue);
+ return 1;
+}
+
+/**
+ * ap_make_etag (request_rec *r, int force_weak)
+ * Construct an entity tag from the resource information. If it's a real
+ * file, build in some of the file characteristics.
+ * @param r The current request
+ * @param force_weak Force the entity tag to be weak - it could be modified
+ * again in as short an interval.
+ * @return The entity tag
+ */
+static int lua_ap_make_etag(lua_State *L)
+{
+ char *returnValue;
+ request_rec *r;
+ int force_weak;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TBOOLEAN);
+ force_weak = luaL_optint(L, 2, 0);
+ returnValue = ap_make_etag(r, force_weak);
+ lua_pushstring(L, returnValue);
+ return 1;
+}
+
+
+
+/**
+ * ap_send_interim_response (request_rec *r, int send_headers)
+ * Send an interim (HTTP 1xx) response immediately.
+ * @param r The request
+ * @param send_headers Whether to send&clear headers in r->headers_out
+ */
+static int lua_ap_send_interim_response(lua_State *L)
+{
+ request_rec *r;
+ int send_headers = 0;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ if (lua_isboolean(L, 2))
+ send_headers = lua_toboolean(L, 2);
+ ap_send_interim_response(r, send_headers);
+ return 0;
+}
+
+
+/**
+ * ap_custom_response (request_rec *r, int status, const char *string)
+ * Install a custom response handler for a given status
+ * @param r The current request
+ * @param status The status for which the custom response should be used
+ * @param string The custom response. This can be a static string, a file
+ * or a URL
+ */
+static int lua_ap_custom_response(lua_State *L)
+{
+ request_rec *r;
+ int status;
+ const char *string;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ luaL_checktype(L, 2, LUA_TNUMBER);
+ status = lua_tointeger(L, 2);
+ luaL_checktype(L, 3, LUA_TSTRING);
+ string = lua_tostring(L, 3);
+ ap_custom_response(r, status, string);
+ return 0;
+}
+
+
+/**
+ * ap_exists_config_define (const char *name)
+ * Check for a definition from the server command line
+ * @param name The define to check for
+ * @return 1 if defined, 0 otherwise
+ */
+static int lua_ap_exists_config_define(lua_State *L)
+{
+ int returnValue;
+ const char *name;
+ luaL_checktype(L, 1, LUA_TSTRING);
+ name = lua_tostring(L, 1);
+ returnValue = ap_exists_config_define(name);
+ lua_pushboolean(L, returnValue);
+ return 1;
+}
+
+static int lua_ap_get_server_name_for_url(lua_State *L)
+{
+ const char *servername;
+ request_rec *r;
+ luaL_checktype(L, 1, LUA_TUSERDATA);
+ r = ap_lua_check_request_rec(L, 1);
+ servername = ap_get_server_name_for_url(r);
+ lua_pushstring(L, servername);
+ return 1;
+}
+
+/* ap_state_query (int query_code) item starts a new field */
+static int lua_ap_state_query(lua_State *L)
+{
+
+ int returnValue;
+ int query_code;
+ luaL_checktype(L, 1, LUA_TNUMBER);
+ query_code = lua_tointeger(L, 1);
+ returnValue = ap_state_query(query_code);
+ lua_pushinteger(L, returnValue);
+ return 1;
+}
+
+/*
+ * lua_ap_usleep; r:usleep(microseconds)
+ * - Sleep for the specified number of microseconds.
+ */
+static int lua_ap_usleep(lua_State *L)
+{
+ apr_interval_time_t msec;
+ luaL_checktype(L, 1, LUA_TNUMBER);
+ msec = (apr_interval_time_t)lua_tonumber(L, 1);
+ apr_sleep(msec);
+ return 0;
+}
+
+/* END dispatch methods for request_rec fields */
static int req_dispatch(lua_State *L)
{
@@ -463,7 +1757,7 @@ static int req_dispatch(lua_State *L)
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01489)
"request_rec->dispatching %s -> int", name);
rs = (*func) (r);
- lua_pushnumber(L, rs);
+ lua_pushinteger(L, rs);
return 1;
}
case APL_REQ_FUNTYPE_BOOLEAN:{
@@ -532,6 +1826,67 @@ static int req_debug(lua_State *L)
return req_log_at(L, APLOG_DEBUG);
}
+static int lua_ivm_get(lua_State *L)
+{
+ const char *key, *raw_key;
+ lua_ivm_object *object = NULL;
+ request_rec *r = ap_lua_check_request_rec(L, 1);
+ key = luaL_checkstring(L, 2);
+ raw_key = apr_pstrcat(r->pool, "lua_ivm_", key, NULL);
+ apr_thread_mutex_lock(lua_ivm_mutex);
+ apr_pool_userdata_get((void **)&object, raw_key, r->server->process->pool);
+ if (object) {
+ if (object->type == LUA_TBOOLEAN) lua_pushboolean(L, (int) object->number);
+ else if (object->type == LUA_TNUMBER) lua_pushnumber(L, object->number);
+ else if (object->type == LUA_TSTRING) lua_pushlstring(L, object->vb.buf, object->size);
+ apr_thread_mutex_unlock(lua_ivm_mutex);
+ return 1;
+ }
+ else {
+ apr_thread_mutex_unlock(lua_ivm_mutex);
+ return 0;
+ }
+}
+
+
+static int lua_ivm_set(lua_State *L)
+{
+ const char *key, *raw_key;
+ const char *value = NULL;
+ size_t str_len;
+ lua_ivm_object *object = NULL;
+ request_rec *r = ap_lua_check_request_rec(L, 1);
+ key = luaL_checkstring(L, 2);
+ luaL_checkany(L, 3);
+ raw_key = apr_pstrcat(r->pool, "lua_ivm_", key, NULL);
+
+ apr_thread_mutex_lock(lua_ivm_mutex);
+ apr_pool_userdata_get((void **)&object, raw_key, r->server->process->pool);
+ if (!object) {
+ object = apr_pcalloc(r->server->process->pool, sizeof(lua_ivm_object));
+ ap_varbuf_init(r->server->process->pool, &object->vb, 2);
+ object->size = 1;
+ object->vb_size = 1;
+ }
+ object->type = lua_type(L, 3);
+ if (object->type == LUA_TNUMBER) object->number = lua_tonumber(L, 3);
+ else if (object->type == LUA_TBOOLEAN) object->number = lua_tonumber(L, 3);
+ else if (object->type == LUA_TSTRING) {
+ value = lua_tolstring(L, 3, &str_len);
+ str_len++; /* add trailing \0 */
+ if ( str_len > object->vb_size) {
+ ap_varbuf_grow(&object->vb, str_len);
+ object->vb_size = str_len;
+ }
+ object->size = str_len-1;
+ memset(object->vb.buf, 0, str_len);
+ memcpy(object->vb.buf, value, str_len-1);
+ }
+ apr_pool_userdata_set(object, raw_key, NULL, r->server->process->pool);
+ apr_thread_mutex_unlock(lua_ivm_mutex);
+ return 0;
+}
+
#define APLUA_REQ_TRACE(lev) static int req_trace##lev(lua_State *L) \
{ \
return req_log_at(L, APLOG_TRACE##lev); \
@@ -623,6 +1978,21 @@ static const struct luaL_Reg connection_methods[] = {
{NULL, NULL}
};
+static const char* lua_ap_auth_name(request_rec* r)
+{
+ const char *name;
+ name = ap_auth_name(r);
+ return name ? name : "";
+}
+
+static const char* lua_ap_get_server_name(request_rec* r)
+{
+ const char *name;
+ name = ap_get_server_name(r);
+ return name ? name : "localhost";
+}
+
+
static const struct luaL_Reg server_methods[] = {
{NULL, NULL}
@@ -637,7 +2007,7 @@ static req_fun_t *makefun(const void *fun, int type, apr_pool_t *pool)
return rft;
}
-AP_LUA_DECLARE(void) ap_lua_load_request_lmodule(lua_State *L, apr_pool_t *p)
+void ap_lua_load_request_lmodule(lua_State *L, apr_pool_t *p)
{
apr_hash_t *dispatch = apr_hash_make(p);
@@ -752,10 +2122,121 @@ AP_LUA_DECLARE(void) ap_lua_load_request_lmodule(lua_State *L, apr_pool_t *p)
makefun(&req_notes, APL_REQ_FUNTYPE_TABLE, p));
apr_hash_set(dispatch, "subprocess_env", APR_HASH_KEY_STRING,
makefun(&req_subprocess_env, APL_REQ_FUNTYPE_TABLE, p));
+ apr_hash_set(dispatch, "flush", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_rflush, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "port", APR_HASH_KEY_STRING,
+ makefun(&req_ap_get_server_port, APL_REQ_FUNTYPE_INT, p));
+ apr_hash_set(dispatch, "banner", APR_HASH_KEY_STRING,
+ makefun(&ap_get_server_banner, APL_REQ_FUNTYPE_STRING, p));
+ apr_hash_set(dispatch, "options", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_options, APL_REQ_FUNTYPE_STRING, p));
+ apr_hash_set(dispatch, "allowoverrides", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_allowoverrides, APL_REQ_FUNTYPE_STRING, p));
+ apr_hash_set(dispatch, "started", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_started, APL_REQ_FUNTYPE_INT, p));
+ apr_hash_set(dispatch, "basic_auth_pw", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_basic_auth_pw, APL_REQ_FUNTYPE_STRING, p));
+ apr_hash_set(dispatch, "limit_req_body", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_limit_req_body, APL_REQ_FUNTYPE_INT, p));
+ apr_hash_set(dispatch, "server_built", APR_HASH_KEY_STRING,
+ makefun(&ap_get_server_built, APL_REQ_FUNTYPE_STRING, p));
+ apr_hash_set(dispatch, "is_initial_req", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_is_initial_req, APL_REQ_FUNTYPE_BOOLEAN, p));
+ apr_hash_set(dispatch, "remaining", APR_HASH_KEY_STRING,
+ makefun(&req_remaining_field, APL_REQ_FUNTYPE_INT, p));
+ apr_hash_set(dispatch, "some_auth_required", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_some_auth_required, APL_REQ_FUNTYPE_BOOLEAN, p));
+ apr_hash_set(dispatch, "server_name", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_get_server_name, APL_REQ_FUNTYPE_STRING, p));
+ apr_hash_set(dispatch, "auth_name", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_auth_name, APL_REQ_FUNTYPE_STRING, p));
+ apr_hash_set(dispatch, "sendfile", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_sendfile, APL_REQ_FUNTYPE_LUACFUN, p));
apr_hash_set(dispatch, "dbacquire", APR_HASH_KEY_STRING,
makefun(&lua_db_acquire, APL_REQ_FUNTYPE_LUACFUN, p));
-
-
+ apr_hash_set(dispatch, "stat", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_stat, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "get_direntries", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_getdir, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "regex", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_regex, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "usleep", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_usleep, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "base64_encode", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_b64encode, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "base64_decode", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_b64decode, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "md5", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_md5, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "sha1", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_sha1, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "htpassword", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_htpassword, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "touch", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_touch, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "mkdir", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_mkdir, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "mkrdir", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_mkrdir, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "rmdir", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_rmdir, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "date_parse_rfc", APR_HASH_KEY_STRING,
+ makefun(&lua_apr_date_parse_rfc, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "escape", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_escape, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "unescape", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_unescape, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "mpm_query", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_mpm_query, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "expr", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_expr, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "scoreboard_process", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_scoreboard_process, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "scoreboard_worker", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_scoreboard_worker, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "clock", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_clock, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "requestbody", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_requestbody, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "add_input_filter", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_add_input_filter, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "module_info", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_module_info, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "loaded_modules", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_loaded_modules, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "runtime_dir_relative", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_runtime_dir_relative, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "server_info", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_server_info, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "set_document_root", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_set_document_root, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "set_context_info", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_set_context_info, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "os_escape_path", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_os_escape_path, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "escape_logitem", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_escape_logitem, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "strcmp_match", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_strcmp_match, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "set_keepalive", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_set_keepalive, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "make_etag", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_make_etag, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "send_interim_response", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_send_interim_response, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "custom_response", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_custom_response, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "exists_config_define", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_exists_config_define, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "state_query", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_state_query, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "get_server_name_for_url", APR_HASH_KEY_STRING,
+ makefun(&lua_ap_get_server_name_for_url, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "ivm_get", APR_HASH_KEY_STRING,
+ makefun(&lua_ivm_get, APL_REQ_FUNTYPE_LUACFUN, p));
+ apr_hash_set(dispatch, "ivm_set", APR_HASH_KEY_STRING,
+ makefun(&lua_ivm_set, APL_REQ_FUNTYPE_LUACFUN, p));
+
lua_pushlightuserdata(L, dispatch);
lua_setfield(L, LUA_REGISTRYINDEX, "Apache2.Request.dispatch");
@@ -785,7 +2266,7 @@ AP_LUA_DECLARE(void) ap_lua_load_request_lmodule(lua_State *L, apr_pool_t *p)
}
-AP_LUA_DECLARE(void) ap_lua_push_connection(lua_State *L, conn_rec *c)
+void ap_lua_push_connection(lua_State *L, conn_rec *c)
{
lua_boxpointer(L, c);
luaL_getmetatable(L, "Apache2.Connection");
@@ -802,7 +2283,7 @@ AP_LUA_DECLARE(void) ap_lua_push_connection(lua_State *L, conn_rec *c)
}
-AP_LUA_DECLARE(void) ap_lua_push_server(lua_State *L, server_rec *s)
+void ap_lua_push_server(lua_State *L, server_rec *s)
{
lua_boxpointer(L, s);
luaL_getmetatable(L, "Apache2.Server");
@@ -815,7 +2296,7 @@ AP_LUA_DECLARE(void) ap_lua_push_server(lua_State *L, server_rec *s)
lua_pop(L, 1);
}
-AP_LUA_DECLARE(void) ap_lua_push_request(lua_State *L, request_rec *r)
+void ap_lua_push_request(lua_State *L, request_rec *r)
{
lua_boxpointer(L, r);
luaL_getmetatable(L, "Apache2.Request");
diff --git a/modules/lua/lua_request.h b/modules/lua/lua_request.h
index ad272dc9..b5ed3e5f 100644
--- a/modules/lua/lua_request.h
+++ b/modules/lua/lua_request.h
@@ -15,15 +15,16 @@
* limitations under the License.
*/
-#include "mod_lua.h"
-
#ifndef _LUA_REQUEST_H_
#define _LUA_REQUEST_H_
-AP_LUA_DECLARE(void) ap_lua_load_request_lmodule(lua_State *L, apr_pool_t *p);
-AP_LUA_DECLARE(void) ap_lua_push_connection(lua_State *L, conn_rec *r);
-AP_LUA_DECLARE(void) ap_lua_push_server(lua_State *L, server_rec *r);
-AP_LUA_DECLARE(void) ap_lua_push_request(lua_State *L, request_rec *r);
+#include "mod_lua.h"
+#include "util_varbuf.h"
+
+void ap_lua_load_request_lmodule(lua_State *L, apr_pool_t *p);
+void ap_lua_push_connection(lua_State *L, conn_rec *r);
+void ap_lua_push_server(lua_State *L, server_rec *r);
+void ap_lua_push_request(lua_State *L, request_rec *r);
#define APL_REQ_FUNTYPE_STRING 1
#define APL_REQ_FUNTYPE_INT 2
@@ -37,5 +38,12 @@ typedef struct
int type;
} req_fun_t;
+typedef struct {
+ int type;
+ size_t size;
+ size_t vb_size;
+ lua_Number number;
+ struct ap_varbuf vb;
+} lua_ivm_object;
#endif /* !_LUA_REQUEST_H_ */
diff --git a/modules/lua/lua_vmprep.c b/modules/lua/lua_vmprep.c
index e821fee3..b0eb01c4 100644
--- a/modules/lua/lua_vmprep.c
+++ b/modules/lua/lua_vmprep.c
@@ -23,6 +23,27 @@
APLOG_USE_MODULE(lua);
+#ifndef AP_LUA_MODULE_EXT
+#if defined(NETWARE)
+#define AP_LUA_MODULE_EXT ".nlm"
+#elif defined(WIN32)
+#define AP_LUA_MODULE_EXT ".dll"
+#elif (defined(__hpux__) || defined(__hpux)) && !defined(__ia64)
+#define AP_LUA_MODULE_EXT ".sl"
+#else
+#define AP_LUA_MODULE_EXT ".so"
+#endif
+#endif
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_t *ap_lua_mutex;
+
+void ap_lua_init_mutex(apr_pool_t *pool, server_rec *s)
+{
+ apr_thread_mutex_create(&ap_lua_mutex, APR_THREAD_MUTEX_DEFAULT, pool);
+}
+#endif
+
/* forward dec'l from this file */
#if 0
@@ -99,7 +120,7 @@ static void pstack_dump(lua_State *L, apr_pool_t *r, int level,
#define makeintegerfield(L, n) lua_pushinteger(L, n); lua_setfield(L, -2, #n)
-AP_LUA_DECLARE(void) ap_lua_load_apache2_lmodule(lua_State *L)
+void ap_lua_load_apache2_lmodule(lua_State *L)
{
lua_getglobal(L, "package");
lua_getfield(L, -1, "loaded");
@@ -127,7 +148,7 @@ AP_LUA_DECLARE(void) ap_lua_load_apache2_lmodule(lua_State *L)
makeintegerfield(L, AUTHZ_NEUTRAL);
makeintegerfield(L, AUTHZ_GENERAL_ERROR);
makeintegerfield(L, AUTHZ_DENIED_NO_USER);
-
+
/*
makeintegerfield(L, HTTP_CONTINUE);
makeintegerfield(L, HTTP_SWITCHING_PROTOCOLS);
@@ -201,6 +222,16 @@ static apr_status_t cleanup_lua(void *l)
return APR_SUCCESS;
}
+static apr_status_t server_cleanup_lua(void *resource)
+{
+ ap_lua_server_spec* spec = (ap_lua_server_spec*) resource;
+ AP_DEBUG_ASSERT(spec != NULL);
+ if (spec->L != NULL) {
+ lua_close((lua_State *) spec->L);
+ }
+ return APR_SUCCESS;
+}
+
/*
munge_path(L,
"path",
@@ -295,8 +326,11 @@ static apr_status_t vm_construct(lua_State **vm, void *params, apr_pool_t *lifec
spec->file);
}
if (spec->package_cpaths) {
- munge_path(L, "cpath", "?.so", "./?.so", lifecycle_pool,
- spec->package_cpaths, spec->file);
+ munge_path(L,
+ "cpath", "?" AP_LUA_MODULE_EXT, "./?" AP_LUA_MODULE_EXT,
+ lifecycle_pool,
+ spec->package_cpaths,
+ spec->file);
}
if (spec->cb) {
@@ -333,33 +367,155 @@ static apr_status_t vm_construct(lua_State **vm, void *params, apr_pool_t *lifec
return APR_SUCCESS;
}
+static ap_lua_vm_spec* copy_vm_spec(apr_pool_t* pool, ap_lua_vm_spec* spec)
+{
+ ap_lua_vm_spec* copied_spec = apr_pcalloc(pool, sizeof(ap_lua_vm_spec));
+ copied_spec->bytecode_len = spec->bytecode_len;
+ copied_spec->bytecode = apr_pstrdup(pool, spec->bytecode);
+ copied_spec->cb = spec->cb;
+ copied_spec->cb_arg = NULL;
+ copied_spec->file = apr_pstrdup(pool, spec->file);
+ copied_spec->package_cpaths = apr_array_copy(pool, spec->package_cpaths);
+ copied_spec->package_paths = apr_array_copy(pool, spec->package_paths);
+ copied_spec->pool = pool;
+ copied_spec->scope = AP_LUA_SCOPE_SERVER;
+ copied_spec->codecache = spec->codecache;
+ return copied_spec;
+}
+
+static apr_status_t server_vm_construct(lua_State **resource, void *params, apr_pool_t *pool)
+{
+ lua_State* L;
+ ap_lua_server_spec* spec = apr_pcalloc(pool, sizeof(ap_lua_server_spec));
+ *resource = NULL;
+ if (vm_construct(&L, params, pool) == APR_SUCCESS) {
+ spec->finfo = apr_pcalloc(pool, sizeof(ap_lua_finfo));
+ if (L != NULL) {
+ spec->L = L;
+ *resource = (void*) spec;
+ lua_pushlightuserdata(L, spec);
+ lua_setfield(L, LUA_REGISTRYINDEX, "Apache2.Lua.server_spec");
+ return APR_SUCCESS;
+ }
+ }
+ return APR_EGENERAL;
+}
+
/**
* Function used to create a lua_State instance bound into the web
* server in the appropriate scope.
*/
-AP_LUA_DECLARE(lua_State*)ap_lua_get_lua_state(apr_pool_t *lifecycle_pool,
- ap_lua_vm_spec *spec)
+lua_State *ap_lua_get_lua_state(apr_pool_t *lifecycle_pool,
+ ap_lua_vm_spec *spec, request_rec* r)
{
lua_State *L = NULL;
-
- if (apr_pool_userdata_get((void **)&L, spec->file,
- lifecycle_pool) == APR_SUCCESS) {
-
- if(L==NULL) {
+ ap_lua_finfo *cache_info = NULL;
+ int tryCache = 0;
+
+ if (spec->scope == AP_LUA_SCOPE_SERVER) {
+ char *hash;
+ apr_reslist_t* reslist = NULL;
+ ap_lua_server_spec* sspec = NULL;
+ hash = apr_psprintf(r->pool, "reslist:%s", spec->file);
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(ap_lua_mutex);
+#endif
+ if (apr_pool_userdata_get((void **)&reslist, hash,
+ r->server->process->pool) == APR_SUCCESS) {
+ if (reslist != NULL) {
+ if (apr_reslist_acquire(reslist, (void**) &sspec) == APR_SUCCESS) {
+ L = sspec->L;
+ cache_info = sspec->finfo;
+ }
+ }
+ }
+ if (L == NULL) {
+ ap_lua_vm_spec* server_spec = copy_vm_spec(r->server->process->pool, spec);
+ if (
+ apr_reslist_create(&reslist, spec->vm_min, spec->vm_max, spec->vm_max, 0,
+ (apr_reslist_constructor) server_vm_construct,
+ (apr_reslist_destructor) server_cleanup_lua,
+ server_spec, r->server->process->pool)
+ == APR_SUCCESS && reslist != NULL) {
+ apr_pool_userdata_set(reslist, hash, NULL,
+ r->server->process->pool);
+ if (apr_reslist_acquire(reslist, (void**) &sspec) == APR_SUCCESS) {
+ L = sspec->L;
+ cache_info = sspec->finfo;
+ }
+ else {
+ return NULL;
+ }
+ }
+ }
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(ap_lua_mutex);
+#endif
+ }
+ else {
+ if (apr_pool_userdata_get((void **)&L, spec->file,
+ lifecycle_pool) != APR_SUCCESS) {
+ L = NULL;
+ }
+ }
+ if (L == NULL) {
ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, lifecycle_pool, APLOGNO(01483)
- "creating lua_State with file %s", spec->file);
+ "creating lua_State with file %s", spec->file);
/* not available, so create */
-
- if(!vm_construct(&L, spec, lifecycle_pool)) {
- AP_DEBUG_ASSERT(L != NULL);
- apr_pool_userdata_set(L,
- spec->file,
- cleanup_lua,
- lifecycle_pool);
+
+ if (!vm_construct(&L, spec, lifecycle_pool)) {
+ AP_DEBUG_ASSERT(L != NULL);
+ apr_pool_userdata_set(L, spec->file, cleanup_lua, lifecycle_pool);
+ }
+ }
+
+ if (spec->codecache == AP_LUA_CACHE_FOREVER || (spec->bytecode && spec->bytecode_len > 0)) {
+ tryCache = 1;
+ }
+ else {
+ char* mkey;
+ if (spec->scope != AP_LUA_SCOPE_SERVER) {
+ mkey = apr_psprintf(r->pool, "ap_lua_modified:%s", spec->file);
+ apr_pool_userdata_get((void **)&cache_info, mkey, lifecycle_pool);
+ if (cache_info == NULL) {
+ cache_info = apr_pcalloc(lifecycle_pool, sizeof(ap_lua_finfo));
+ apr_pool_userdata_set((void*) cache_info, mkey, NULL, lifecycle_pool);
+ }
+ }
+ if (spec->codecache == AP_LUA_CACHE_STAT) {
+ apr_finfo_t lua_finfo;
+ apr_stat(&lua_finfo, spec->file, APR_FINFO_MTIME|APR_FINFO_SIZE, lifecycle_pool);
+
+ /* On first visit, modified will be zero, but that's fine - The file is
+ loaded in the vm_construct function.
+ */
+ if ((cache_info->modified == lua_finfo.mtime && cache_info->size == lua_finfo.size)
+ || cache_info->modified == 0) {
+ tryCache = 1;
+ }
+ cache_info->modified = lua_finfo.mtime;
+ cache_info->size = lua_finfo.size;
+ }
+ else if (spec->codecache == AP_LUA_CACHE_NEVER) {
+ if (cache_info->runs == 0)
+ tryCache = 1;
+ }
+ cache_info->runs++;
+ }
+ if (tryCache == 0 && spec->scope != AP_LUA_SCOPE_ONCE) {
+ int rc;
+ ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, lifecycle_pool, APLOGNO(02332)
+ "(re)loading lua file %s", spec->file);
+ rc = luaL_loadfile(L, spec->file);
+ if (rc != 0) {
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, lifecycle_pool, APLOGNO(02333)
+ "Error loading %s: %s", spec->file,
+ rc == LUA_ERRMEM ? "memory allocation error"
+ : lua_tostring(L, 0));
+ return 0;
}
- }
+ lua_pcall(L, 0, LUA_MULTRET, 0);
}
- /*}*/
return L;
}
diff --git a/modules/lua/lua_vmprep.h b/modules/lua/lua_vmprep.h
index 1d3758ca..e46ac9b8 100644
--- a/modules/lua/lua_vmprep.h
+++ b/modules/lua/lua_vmprep.h
@@ -29,6 +29,7 @@
#include "apr_file_info.h"
#include "apr_time.h"
#include "apr_pools.h"
+#include "apr_reslist.h"
#ifndef VMPREP_H
@@ -39,11 +40,18 @@
#define AP_LUA_SCOPE_REQUEST 2
#define AP_LUA_SCOPE_CONN 3
#define AP_LUA_SCOPE_THREAD 4
+#define AP_LUA_SCOPE_SERVER 5
+#define AP_LUA_CACHE_UNSET 0
+#define AP_LUA_CACHE_NEVER 1
+#define AP_LUA_CACHE_STAT 2
+#define AP_LUA_CACHE_FOREVER 3
+
+#define AP_LUA_FILTER_INPUT 1
+#define AP_LUA_FILTER_OUTPUT 2
typedef void (*ap_lua_state_open_callback) (lua_State *L, apr_pool_t *p,
void *ctx);
-
/**
* Specification for a lua virtual machine
*/
@@ -56,8 +64,10 @@ typedef struct
/* name of base file to load in the vm */
const char *file;
- /* APL_SCOPE_ONCE | APL_SCOPE_REQUEST | APL_SCOPE_CONN | APL_SCOPE_THREAD */
+ /* APL_SCOPE_ONCE | APL_SCOPE_REQUEST | APL_SCOPE_CONN | APL_SCOPE_THREAD | APL_SCOPE_SERVER */
int scope;
+ unsigned int vm_min;
+ unsigned int vm_max;
ap_lua_state_open_callback cb;
void* cb_arg;
@@ -71,6 +81,8 @@ typedef struct
*/
const char *bytecode;
apr_size_t bytecode_len;
+
+ int codecache;
} ap_lua_vm_spec;
typedef struct
@@ -81,18 +93,32 @@ typedef struct
ap_regex_t *uri_pattern;
const char *bytecode;
apr_size_t bytecode_len;
+ int codecache;
} ap_lua_mapped_handler_spec;
-/* remove and make static once out of mod_wombat.c */
-AP_LUA_DECLARE(void) ap_lua_openlibs(lua_State *L);
+typedef struct
+{
+ const char *function_name;
+ const char *file_name;
+ const char* filter_name;
+ int direction; /* AP_LUA_FILTER_INPUT | AP_LUA_FILTER_OUTPUT */
+} ap_lua_filter_handler_spec;
-/* remove and make static once out of mod_wombat.c */
-AP_LUA_DECLARE(void) ap_lua_registerlib(lua_State *L, char *name, lua_CFunction f);
+typedef struct {
+ apr_size_t runs;
+ apr_time_t modified;
+ apr_off_t size;
+} ap_lua_finfo;
+
+typedef struct {
+ lua_State* L;
+ ap_lua_finfo* finfo;
+} ap_lua_server_spec;
/**
* Fake out addition of the "apache2" module
*/
-AP_LUA_DECLARE(void) ap_lua_load_apache2_lmodule(lua_State *L);
+void ap_lua_load_apache2_lmodule(lua_State *L);
/*
* alternate means of getting lua_State (preferred eventually)
@@ -106,9 +132,16 @@ AP_LUA_DECLARE(void) ap_lua_load_apache2_lmodule(lua_State *L);
* @cb callback for vm initialization called *before* the file is opened
* @ctx a baton passed to cb
*/
-AP_LUA_DECLARE(lua_State*) ap_lua_get_lua_state(apr_pool_t *lifecycle_pool,
- ap_lua_vm_spec *spec);
-
+lua_State *ap_lua_get_lua_state(apr_pool_t *lifecycle_pool,
+ ap_lua_vm_spec *spec, request_rec* r);
+#if APR_HAS_THREADS || defined(DOXYGEN)
+/*
+ * Initialize mod_lua mutex.
+ * @pool pool for mutex
+ * @s server_rec for logging
+ */
+void ap_lua_init_mutex(apr_pool_t *pool, server_rec *s);
+#endif
#endif
diff --git a/modules/lua/mod_lua.c b/modules/lua/mod_lua.c
index b5b626d1..7c35011e 100644
--- a/modules/lua/mod_lua.c
+++ b/modules/lua/mod_lua.c
@@ -19,6 +19,7 @@
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
+#include <apr_thread_mutex.h>
#include "lua_apr.h"
#include "lua_config.h"
@@ -55,6 +56,15 @@ typedef struct {
apr_hash_t *lua_authz_providers;
+typedef struct
+{
+ apr_bucket_brigade *tmpBucket;
+ lua_State *L;
+ ap_lua_vm_spec *spec;
+ int broken;
+} lua_filter_ctx;
+
+apr_thread_mutex_t* lua_ivm_mutex = NULL;
/**
* error reporting if lua has an error.
@@ -65,7 +75,6 @@ static void report_lua_error(lua_State *L, request_rec *r)
const char *lua_response;
r->status = HTTP_INTERNAL_SERVER_ERROR;
r->content_type = "text/html";
-
ap_rputs("<b>Error!</b>\n", r);
ap_rputs("<p>", r);
lua_response = lua_tostring(L, -1);
@@ -103,9 +112,31 @@ static const char *scope_to_string(unsigned int scope)
#if APR_HAS_THREADS
case AP_LUA_SCOPE_THREAD:
return "thread";
+ case AP_LUA_SCOPE_SERVER:
+ return "server";
#endif
default:
ap_assert(0);
+ return 0;
+ }
+}
+
+static void ap_lua_release_state(lua_State* L, ap_lua_vm_spec* spec, request_rec* r) {
+ char *hash;
+ apr_reslist_t* reslist = NULL;
+ if (spec->scope == AP_LUA_SCOPE_SERVER) {
+ ap_lua_server_spec* sspec = NULL;
+ lua_settop(L, 0);
+ lua_getfield(L, LUA_REGISTRYINDEX, "Apache2.Lua.server_spec");
+ sspec = (ap_lua_server_spec*) lua_touserdata(L, 1);
+ hash = apr_psprintf(r->pool, "reslist:%s", spec->file);
+ if (apr_pool_userdata_get((void **)&reslist, hash,
+ r->server->process->pool) == APR_SUCCESS) {
+ AP_DEBUG_ASSERT(sspec != NULL);
+ if (reslist != NULL) {
+ apr_reslist_release(reslist, sspec);
+ }
+ }
}
}
@@ -130,7 +161,10 @@ static ap_lua_vm_spec *create_vm_spec(apr_pool_t **lifecycle_pool,
spec->cb_arg = NULL;
spec->bytecode = bytecode;
spec->bytecode_len = bytecode_len;
-
+ spec->codecache = (cfg->codecache == AP_LUA_CACHE_UNSET) ? AP_LUA_CACHE_STAT : cfg->codecache;
+ spec->vm_min = cfg->vm_min ? cfg->vm_min : 1;
+ spec->vm_max = cfg->vm_max ? cfg->vm_max : 1;
+
if (filename) {
char *file;
apr_filepath_merge(&file, server_cfg->root_path,
@@ -160,6 +194,9 @@ static ap_lua_vm_spec *create_vm_spec(apr_pool_t **lifecycle_pool,
case AP_LUA_SCOPE_THREAD:
pool = apr_thread_pool_get(r->connection->current_thread);
break;
+ case AP_LUA_SCOPE_SERVER:
+ pool = r->server->process->pool;
+ break;
#endif
default:
ap_assert(0);
@@ -169,15 +206,58 @@ static ap_lua_vm_spec *create_vm_spec(apr_pool_t **lifecycle_pool,
return spec;
}
+static const char* ap_lua_interpolate_string(apr_pool_t* pool, const char* string, const char** values)
+{
+ char *stringBetween;
+ const char* ret;
+ int srclen,x,y;
+ srclen = strlen(string);
+ ret = "";
+ y = 0;
+ for (x=0; x < srclen; x++) {
+ if (string[x] == '$' && x != srclen-1 && string[x+1] >= '0' && string[x+1] <= '9') {
+ int v = *(string+x+1) - '0';
+ if (x-y > 0) {
+ stringBetween = apr_pstrndup(pool, string+y, x-y);
+ }
+ else {
+ stringBetween = "";
+ }
+ ret = apr_pstrcat(pool, ret, stringBetween, values[v], NULL);
+ y = ++x+1;
+ }
+ }
+
+ if (x-y > 0 && y > 0) {
+ stringBetween = apr_pstrndup(pool, string+y, x-y);
+ ret = apr_pstrcat(pool, ret, stringBetween, NULL);
+ }
+ /* If no replacement was made, just return the original string */
+ else if (y == 0) {
+ return string;
+ }
+ return ret;
+}
+
+
/**
* "main"
*/
static int lua_handler(request_rec *r)
{
+ int rc = OK;
if (strcmp(r->handler, "lua-script")) {
return DECLINED;
}
+ /* Decline the request if the script does not exist (or is a directory),
+ * rather than just returning internal server error */
+ if (
+ (r->finfo.filetype == APR_NOFILE)
+ || (r->finfo.filetype & APR_DIR)
+ ) {
+ return DECLINED;
+ }
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(01472)
"handling [%s] in mod_lua", r->filename);
@@ -190,11 +270,12 @@ static int lua_handler(request_rec *r)
ap_lua_vm_spec *spec = create_vm_spec(&pool, r, cfg, NULL, NULL, NULL,
0, "handle", "request handler");
- L = ap_lua_get_lua_state(pool, spec);
+ L = ap_lua_get_lua_state(pool, spec, r);
if (!L) {
/* TODO annotate spec with failure reason */
r->status = HTTP_INTERNAL_SERVER_ERROR;
ap_rputs("Unable to compile VM, see logs", r);
+ ap_lua_release_state(L, spec, r);
return HTTP_INTERNAL_SERVER_ERROR;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, APLOGNO(01474) "got a vm!");
@@ -204,17 +285,309 @@ static int lua_handler(request_rec *r)
"lua: Unable to find function %s in %s",
"handle",
spec->file);
+ ap_lua_release_state(L, spec, r);
return HTTP_INTERNAL_SERVER_ERROR;
}
ap_lua_run_lua_request(L, r);
- if (lua_pcall(L, 1, 0, 0)) {
+ if (lua_pcall(L, 1, 1, 0)) {
report_lua_error(L, r);
}
+ if (lua_isnumber(L, -1)) {
+ rc = lua_tointeger(L, -1);
+ }
+ ap_lua_release_state(L, spec, r);
}
- return OK;
+ return rc;
}
+/* ------------------- Input/output content filters ------------------- */
+
+
+static apr_status_t lua_setup_filter_ctx(ap_filter_t* f, request_rec* r, lua_filter_ctx** c) {
+ apr_pool_t *pool;
+ ap_lua_vm_spec *spec;
+ int n, rc;
+ lua_State *L;
+ lua_filter_ctx *ctx;
+ ap_lua_server_cfg *server_cfg = ap_get_module_config(r->server->module_config,
+ &lua_module);
+ const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config,
+ &lua_module);
+
+ ctx = apr_pcalloc(r->pool, sizeof(lua_filter_ctx));
+ ctx->broken = 0;
+ *c = ctx;
+ /* Find the filter that was called */
+ for (n = 0; n < cfg->mapped_filters->nelts; n++) {
+ ap_lua_filter_handler_spec *hook_spec =
+ ((ap_lua_filter_handler_spec **) cfg->mapped_filters->elts)[n];
+
+ if (hook_spec == NULL) {
+ continue;
+ }
+ if (!strcasecmp(hook_spec->filter_name, f->frec->name)) {
+ spec = create_vm_spec(&pool, r, cfg, server_cfg,
+ hook_spec->file_name,
+ NULL,
+ 0,
+ hook_spec->function_name,
+ "filter");
+ L = ap_lua_get_lua_state(pool, spec, r);
+ if (L) {
+ L = lua_newthread(L);
+ }
+
+ if (!L) {
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02328)
+ "lua: Failed to obtain lua interpreter for %s %s",
+ hook_spec->function_name, hook_spec->file_name);
+ ap_lua_release_state(L, spec, r);
+ return APR_EGENERAL;
+ }
+ if (hook_spec->function_name != NULL) {
+ lua_getglobal(L, hook_spec->function_name);
+ if (!lua_isfunction(L, -1)) {
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02329)
+ "lua: Unable to find function %s in %s",
+ hook_spec->function_name,
+ hook_spec->file_name);
+ ap_lua_release_state(L, spec, r);
+ return APR_EGENERAL;
+ }
+
+ ap_lua_run_lua_request(L, r);
+ }
+ else {
+ int t;
+ ap_lua_run_lua_request(L, r);
+
+ t = lua_gettop(L);
+ lua_setglobal(L, "r");
+ lua_settop(L, t);
+ }
+ ctx->L = L;
+ ctx->spec = spec;
+
+ /* If a Lua filter is interested in filtering a request, it must first do a yield,
+ * otherwise we'll assume that it's not interested and pretend we didn't find it.
+ */
+ rc = lua_resume(L, 1);
+ if (rc == LUA_YIELD) {
+ return OK;
+ }
+ else {
+ ap_lua_release_state(L, spec, r);
+ return APR_ENOENT;
+ }
+ }
+ }
+ return APR_ENOENT;
+}
+
+static apr_status_t lua_output_filter_handle(ap_filter_t *f, apr_bucket_brigade *pbbIn) {
+ request_rec *r = f->r;
+ int rc;
+ lua_State *L;
+ lua_filter_ctx* ctx;
+ conn_rec *c = r->connection;
+ apr_bucket *pbktIn;
+ apr_status_t rv;
+
+ /* Set up the initial filter context and acquire the function.
+ * The corresponding Lua function should yield here.
+ */
+ if (!f->ctx) {
+ rc = lua_setup_filter_ctx(f,r,&ctx);
+ if (rc == APR_EGENERAL) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if (rc == APR_ENOENT) {
+ /* No filter entry found (or the script declined to filter), just pass on the buckets */
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next,pbbIn);
+ }
+ f->ctx = ctx;
+ ctx->tmpBucket = apr_brigade_create(r->pool, c->bucket_alloc);
+ }
+ ctx = (lua_filter_ctx*) f->ctx;
+ L = ctx->L;
+ /* While the Lua function is still yielding, pass in buckets to the coroutine */
+ if (!ctx->broken) {
+ for (pbktIn = APR_BRIGADE_FIRST(pbbIn);
+ pbktIn != APR_BRIGADE_SENTINEL(pbbIn);
+ pbktIn = APR_BUCKET_NEXT(pbktIn))
+ {
+ const char *data;
+ apr_size_t len;
+ apr_bucket *pbktOut;
+
+ /* read the bucket */
+ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ);
+
+ /* Push the bucket onto the Lua stack as a global var */
+ lua_pushlstring(L, data, len);
+ lua_setglobal(L, "bucket");
+
+ /* If Lua yielded, it means we have something to pass on */
+ if (lua_resume(L, 0) == LUA_YIELD) {
+ size_t olen;
+ const char* output = lua_tolstring(L, 1, &olen);
+ pbktOut = apr_bucket_heap_create(output, olen, NULL,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->tmpBucket, pbktOut);
+ rv = ap_pass_brigade(f->next, ctx->tmpBucket);
+ apr_brigade_cleanup(ctx->tmpBucket);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ else {
+ ctx->broken = 1;
+ ap_lua_release_state(L, ctx->spec, r);
+ ap_remove_output_filter(f);
+ apr_brigade_cleanup(pbbIn);
+ apr_brigade_cleanup(ctx->tmpBucket);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ /* If we've safely reached the end, do a final call to Lua to allow for any
+ finishing moves by the script, such as appending a tail. */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pbbIn))) {
+ apr_bucket *pbktEOS;
+ lua_pushnil(L);
+ lua_setglobal(L, "bucket");
+ if (lua_resume(L, 0) == LUA_YIELD) {
+ apr_bucket *pbktOut;
+ size_t olen;
+ const char* output = lua_tolstring(L, 1, &olen);
+ pbktOut = apr_bucket_heap_create(output, olen, NULL,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->tmpBucket, pbktOut);
+ }
+ pbktEOS = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->tmpBucket, pbktEOS);
+ ap_lua_release_state(L, ctx->spec, r);
+ rv = ap_pass_brigade(f->next, ctx->tmpBucket);
+ apr_brigade_cleanup(ctx->tmpBucket);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ }
+ /* Clean up */
+ apr_brigade_cleanup(pbbIn);
+ return APR_SUCCESS;
+}
+
+
+
+static apr_status_t lua_input_filter_handle(ap_filter_t *f,
+ apr_bucket_brigade *pbbOut,
+ ap_input_mode_t eMode,
+ apr_read_type_e eBlock,
+ apr_off_t nBytes)
+{
+ request_rec *r = f->r;
+ int rc, lastCall = 0;
+ lua_State *L;
+ lua_filter_ctx* ctx;
+ conn_rec *c = r->connection;
+ apr_status_t ret;
+
+ /* Set up the initial filter context and acquire the function.
+ * The corresponding Lua function should yield here.
+ */
+ if (!f->ctx) {
+ rc = lua_setup_filter_ctx(f,r,&ctx);
+ f->ctx = ctx;
+ if (rc == APR_EGENERAL) {
+ ctx->broken = 1;
+ ap_remove_input_filter(f);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ if (rc == APR_ENOENT ) {
+ ap_remove_input_filter(f);
+ ctx->broken = 1;
+ }
+ if (rc == APR_SUCCESS) {
+ ctx->tmpBucket = apr_brigade_create(r->pool, c->bucket_alloc);
+ }
+ }
+ ctx = (lua_filter_ctx*) f->ctx;
+ L = ctx->L;
+ /* If the Lua script broke or denied serving the request, just pass the buckets through */
+ if (ctx->broken) {
+ return ap_get_brigade(f->next, pbbOut, eMode, eBlock, nBytes);
+ }
+
+ if (APR_BRIGADE_EMPTY(ctx->tmpBucket)) {
+ ret = ap_get_brigade(f->next, ctx->tmpBucket, eMode, eBlock, nBytes);
+ if (eMode == AP_MODE_EATCRLF || ret != APR_SUCCESS)
+ return ret;
+ }
+
+ /* While the Lua function is still yielding, pass buckets to the coroutine */
+ if (!ctx->broken) {
+ lastCall = 0;
+ while(!APR_BRIGADE_EMPTY(ctx->tmpBucket)) {
+ apr_bucket *pbktIn = APR_BRIGADE_FIRST(ctx->tmpBucket);
+ apr_bucket *pbktOut;
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(pbktIn)) {
+ APR_BUCKET_REMOVE(pbktIn);
+ break;
+ }
+
+ /* read the bucket */
+ ret = apr_bucket_read(pbktIn, &data, &len, eBlock);
+ if (ret != APR_SUCCESS)
+ return ret;
+
+ /* Push the bucket onto the Lua stack as a global var */
+ lastCall++;
+ lua_pushlstring(L, data, len);
+ lua_setglobal(L, "bucket");
+
+ /* If Lua yielded, it means we have something to pass on */
+ if (lua_resume(L, 0) == LUA_YIELD) {
+ size_t olen;
+ const char* output = lua_tolstring(L, 1, &olen);
+ pbktOut = apr_bucket_heap_create(output, olen, 0, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut);
+ apr_bucket_delete(pbktIn);
+ return APR_SUCCESS;
+ }
+ else {
+ ctx->broken = 1;
+ ap_lua_release_state(L, ctx->spec, r);
+ ap_remove_input_filter(f);
+ apr_bucket_delete(pbktIn);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ /* If we've safely reached the end, do a final call to Lua to allow for any
+ finishing moves by the script, such as appending a tail. */
+ if (lastCall == 0) {
+ apr_bucket *pbktEOS = apr_bucket_eos_create(c->bucket_alloc);
+ lua_pushnil(L);
+ lua_setglobal(L, "bucket");
+ if (lua_resume(L, 0) == LUA_YIELD) {
+ apr_bucket *pbktOut;
+ size_t olen;
+ const char* output = lua_tolstring(L, 1, &olen);
+ pbktOut = apr_bucket_heap_create(output, olen, 0, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut);
+ }
+ APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS);
+ ap_lua_release_state(L, ctx->spec, r);
+ }
+ }
+ return APR_SUCCESS;
+}
+
/* ---------------- Configury stuff --------------- */
@@ -249,7 +622,7 @@ static int lua_request_rec_hook_harness(request_rec *r, const char *name, int ap
hook_spec->function_name,
"request hook");
- L = ap_lua_get_lua_state(pool, spec);
+ L = ap_lua_get_lua_state(pool, spec, r);
if (!L) {
ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(01477)
@@ -265,6 +638,7 @@ static int lua_request_rec_hook_harness(request_rec *r, const char *name, int ap
"lua: Unable to find function %s in %s",
hook_spec->function_name,
hook_spec->file_name);
+ ap_lua_release_state(L, spec, r);
return HTTP_INTERNAL_SERVER_ERROR;
}
@@ -281,6 +655,7 @@ static int lua_request_rec_hook_harness(request_rec *r, const char *name, int ap
if (lua_pcall(L, 1, 1, 0)) {
report_lua_error(L, r);
+ ap_lua_release_state(L, spec, r);
return HTTP_INTERNAL_SERVER_ERROR;
}
rc = DECLINED;
@@ -288,6 +663,102 @@ static int lua_request_rec_hook_harness(request_rec *r, const char *name, int ap
rc = lua_tointeger(L, -1);
}
if (rc != DECLINED) {
+ ap_lua_release_state(L, spec, r);
+ return rc;
+ }
+ ap_lua_release_state(L, spec, r);
+ }
+ }
+ return DECLINED;
+}
+
+
+static int lua_map_handler(request_rec *r)
+{
+ int rc, n = 0;
+ apr_pool_t *pool;
+ lua_State *L;
+ const char *filename, *function_name;
+ const char *values[10];
+ ap_lua_vm_spec *spec;
+ ap_regmatch_t match[10];
+ ap_lua_server_cfg *server_cfg = ap_get_module_config(r->server->module_config,
+ &lua_module);
+ const ap_lua_dir_cfg *cfg = ap_get_module_config(r->per_dir_config,
+ &lua_module);
+ for (n = 0; n < cfg->mapped_handlers->nelts; n++) {
+ ap_lua_mapped_handler_spec *hook_spec =
+ ((ap_lua_mapped_handler_spec **) cfg->mapped_handlers->elts)[n];
+
+ if (hook_spec == NULL) {
+ continue;
+ }
+ if (!ap_regexec(hook_spec->uri_pattern, r->uri, 10, match, 0)) {
+ int i;
+ for (i=0 ; i < 10; i++) {
+ if (match[i].rm_eo >= 0) {
+ values[i] = apr_pstrndup(r->pool, r->uri+match[i].rm_so, match[i].rm_eo - match[i].rm_so);
+ }
+ else values[i] = "";
+ }
+ filename = ap_lua_interpolate_string(r->pool, hook_spec->file_name, values);
+ function_name = ap_lua_interpolate_string(r->pool, hook_spec->function_name, values);
+ spec = create_vm_spec(&pool, r, cfg, server_cfg,
+ filename,
+ hook_spec->bytecode,
+ hook_spec->bytecode_len,
+ function_name,
+ "mapped handler");
+ L = ap_lua_get_lua_state(pool, spec, r);
+
+ if (!L) {
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02330)
+ "lua: Failed to obtain lua interpreter for %s %s",
+ function_name, filename);
+ ap_lua_release_state(L, spec, r);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ if (function_name != NULL) {
+ lua_getglobal(L, function_name);
+ if (!lua_isfunction(L, -1)) {
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02331)
+ "lua: Unable to find function %s in %s",
+ function_name,
+ filename);
+ ap_lua_release_state(L, spec, r);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ ap_lua_run_lua_request(L, r);
+ }
+ else {
+ int t;
+ ap_lua_run_lua_request(L, r);
+
+ t = lua_gettop(L);
+ lua_setglobal(L, "r");
+ lua_settop(L, t);
+ }
+
+ if (lua_pcall(L, 1, 1, 0)) {
+ report_lua_error(L, r);
+ ap_lua_release_state(L, spec, r);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ rc = DECLINED;
+ if (lua_isnumber(L, -1)) {
+ rc = lua_tointeger(L, -1);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02483)
+ "lua: Lua handler %s in %s did not return a value, assuming apache2.OK",
+ function_name,
+ filename);
+ rc = OK;
+ }
+ ap_lua_release_state(L, spec, r);
+ if (rc != DECLINED) {
return rc;
}
}
@@ -564,7 +1035,55 @@ static const char *register_named_file_function_hook(const char *name,
*(ap_lua_mapped_handler_spec **) apr_array_push(hook_specs) = spec;
return NULL;
}
+static const char *register_mapped_file_function_hook(const char *pattern,
+ cmd_parms *cmd,
+ void *_cfg,
+ const char *file,
+ const char *function)
+{
+ ap_lua_mapped_handler_spec *spec;
+ ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg;
+ ap_regex_t *regex = apr_pcalloc(cmd->pool, sizeof(ap_regex_t));
+ if (ap_regcomp(regex, pattern,0)) {
+ return "Invalid regex pattern!";
+ }
+
+ spec = apr_pcalloc(cmd->pool, sizeof(ap_lua_mapped_handler_spec));
+ spec->file_name = apr_pstrdup(cmd->pool, file);
+ spec->function_name = apr_pstrdup(cmd->pool, function);
+ spec->scope = cfg->vm_scope;
+ spec->uri_pattern = regex;
+
+ *(ap_lua_mapped_handler_spec **) apr_array_push(cfg->mapped_handlers) = spec;
+ return NULL;
+}
+static const char *register_filter_function_hook(const char *filter,
+ cmd_parms *cmd,
+ void *_cfg,
+ const char *file,
+ const char *function,
+ int direction)
+{
+ ap_lua_filter_handler_spec *spec;
+ ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg;
+
+ spec = apr_pcalloc(cmd->pool, sizeof(ap_lua_filter_handler_spec));
+ spec->file_name = apr_pstrdup(cmd->pool, file);
+ spec->function_name = apr_pstrdup(cmd->pool, function);
+ spec->filter_name = filter;
+ *(ap_lua_filter_handler_spec **) apr_array_push(cfg->mapped_filters) = spec;
+ /* TODO: Make it work on other types than just AP_FTYPE_RESOURCE? */
+ if (direction == AP_LUA_FILTER_OUTPUT) {
+ spec->direction = AP_LUA_FILTER_OUTPUT;
+ ap_register_output_filter(filter, lua_output_filter_handle, NULL, AP_FTYPE_RESOURCE);
+ }
+ else {
+ spec->direction = AP_LUA_FILTER_INPUT;
+ ap_register_input_filter(filter, lua_input_filter_handle, NULL, AP_FTYPE_RESOURCE);
+ }
+ return NULL;
+}
static int lua_check_user_id_harness_first(request_rec *r)
{
return lua_request_rec_hook_harness(r, "check_user_id", AP_LUA_HOOK_FIRST);
@@ -827,6 +1346,42 @@ static const char *register_quick_hook(cmd_parms *cmd, void *_cfg,
return register_named_file_function_hook("quick", cmd, _cfg, file,
function, APR_HOOK_MIDDLE);
}
+static const char *register_map_handler(cmd_parms *cmd, void *_cfg,
+ const char* match, const char *file, const char *function)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES|
+ NOT_IN_HTACCESS);
+ if (err) {
+ return err;
+ }
+ if (!function) function = "handle";
+ return register_mapped_file_function_hook(match, cmd, _cfg, file,
+ function);
+}
+static const char *register_output_filter(cmd_parms *cmd, void *_cfg,
+ const char* filter, const char *file, const char *function)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES|
+ NOT_IN_HTACCESS);
+ if (err) {
+ return err;
+ }
+ if (!function) function = "handle";
+ return register_filter_function_hook(filter, cmd, _cfg, file,
+ function, AP_LUA_FILTER_OUTPUT);
+}
+static const char *register_input_filter(cmd_parms *cmd, void *_cfg,
+ const char* filter, const char *file, const char *function)
+{
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_DIRECTORY|NOT_IN_FILES|
+ NOT_IN_HTACCESS);
+ if (err) {
+ return err;
+ }
+ if (!function) function = "handle";
+ return register_filter_function_hook(filter, cmd, _cfg, file,
+ function, AP_LUA_FILTER_INPUT);
+}
static const char *register_quick_block(cmd_parms *cmd, void *_cfg,
const char *line)
{
@@ -910,6 +1465,29 @@ static const char *register_lua_inherit(cmd_parms *cmd,
}
return NULL;
}
+static const char *register_lua_codecache(cmd_parms *cmd,
+ void *_cfg,
+ const char *arg)
+{
+ ap_lua_dir_cfg *cfg = (ap_lua_dir_cfg *) _cfg;
+
+ if (strcasecmp("never", arg) == 0) {
+ cfg->codecache = AP_LUA_CACHE_NEVER;
+ }
+ else if (strcasecmp("stat", arg) == 0) {
+ cfg->codecache = AP_LUA_CACHE_STAT;
+ }
+ else if (strcasecmp("forever", arg) == 0) {
+ cfg->codecache = AP_LUA_CACHE_FOREVER;
+ }
+ else {
+ return apr_psprintf(cmd->pool,
+ "LuaCodeCache type of '%s' not recognized, valid "
+ "options are 'never', 'stat', and 'forever'",
+ arg);
+ }
+ return NULL;
+}
static const char *register_lua_scope(cmd_parms *cmd,
void *_cfg,
const char *scope,
@@ -936,12 +1514,33 @@ static const char *register_lua_scope(cmd_parms *cmd,
#endif
cfg->vm_scope = AP_LUA_SCOPE_THREAD;
}
+ else if (strcmp("server", scope) == 0) {
+ unsigned int vmin, vmax;
+#if !APR_HAS_THREADS
+ return apr_psprintf(cmd->pool,
+ "Scope type of '%s' cannot be used because this "
+ "server does not have threading support "
+ "(APR_HAS_THREADS)"
+ scope);
+#endif
+ cfg->vm_scope = AP_LUA_SCOPE_SERVER;
+ vmin = min ? atoi(min) : 1;
+ vmax = max ? atoi(max) : 1;
+ if (vmin == 0) {
+ vmin = 1;
+ }
+ if (vmax < vmin) {
+ vmax = vmin;
+ }
+ cfg->vm_min = vmin;
+ cfg->vm_max = vmax;
+ }
else {
return apr_psprintf(cmd->pool,
"Invalid value for LuaScope, '%s', acceptable "
- "values are: 'once', 'request', 'conn', 'server'"
+ "values are: 'once', 'request', 'conn'"
#if APR_HAS_THREADS
- ", 'thread'"
+ ", 'thread', 'server'"
#endif
,scope);
}
@@ -961,7 +1560,9 @@ static const char *register_lua_root(cmd_parms *cmd, void *_cfg,
cfg->root_path = root;
return NULL;
}
-AP_LUA_DECLARE(const char *) ap_lua_ssl_val(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, const char *var)
+
+const char *ap_lua_ssl_val(apr_pool_t *p, server_rec *s, conn_rec *c,
+ request_rec *r, const char *var)
{
if (lua_ssl_val) {
return (const char *)lua_ssl_val(p, s, c, r, (char *)var);
@@ -969,7 +1570,7 @@ AP_LUA_DECLARE(const char *) ap_lua_ssl_val(apr_pool_t *p, server_rec *s, conn_r
return NULL;
}
-AP_LUA_DECLARE(int) ap_lua_ssl_is_https(conn_rec *c)
+int ap_lua_ssl_is_https(conn_rec *c)
{
return lua_ssl_is_https ? lua_ssl_is_https(c) : 0;
}
@@ -1018,7 +1619,7 @@ static authz_status lua_authz_check(request_rec *r, const char *require_line,
spec = create_vm_spec(&pool, r, cfg, server_cfg, prov_spec->file_name,
NULL, 0, prov_spec->function_name, "authz provider");
- L = ap_lua_get_lua_state(pool, spec);
+ L = ap_lua_get_lua_state(pool, spec, r);
if (L == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02314)
"Unable to compile VM for authz provider %s", prov_spec->name);
@@ -1029,6 +1630,7 @@ static authz_status lua_authz_check(request_rec *r, const char *require_line,
ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, APLOGNO(02319)
"Unable to find function %s in %s",
prov_spec->function_name, prov_spec->file_name);
+ ap_lua_release_state(L, spec, r);
return AUTHZ_GENERAL_ERROR;
}
ap_lua_run_lua_request(L, r);
@@ -1037,6 +1639,7 @@ static authz_status lua_authz_check(request_rec *r, const char *require_line,
if (!lua_checkstack(L, prov_spec->args->nelts)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02315)
"Error: authz provider %s: too many arguments", prov_spec->name);
+ ap_lua_release_state(L, spec, r);
return AUTHZ_GENERAL_ERROR;
}
for (i = 0; i < prov_spec->args->nelts; i++) {
@@ -1049,14 +1652,17 @@ static authz_status lua_authz_check(request_rec *r, const char *require_line,
const char *err = lua_tostring(L, -1);
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02316)
"Error executing authz provider %s: %s", prov_spec->name, err);
+ ap_lua_release_state(L, spec, r);
return AUTHZ_GENERAL_ERROR;
}
if (!lua_isnumber(L, -1)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02317)
"Error: authz provider %s did not return integer", prov_spec->name);
+ ap_lua_release_state(L, spec, r);
return AUTHZ_GENERAL_ERROR;
}
result = lua_tointeger(L, -1);
+ ap_lua_release_state(L, spec, r);
switch (result) {
case AUTHZ_DENIED:
case AUTHZ_GRANTED:
@@ -1184,6 +1790,10 @@ command_rec lua_commands[] = {
AP_INIT_TAKE1("LuaInherit", register_lua_inherit, NULL, OR_ALL,
"Controls how Lua scripts in parent contexts are merged with the current "
" context: none|parent-last|parent-first (default: parent-first) "),
+
+ AP_INIT_TAKE1("LuaCodeCache", register_lua_codecache, NULL, OR_ALL,
+ "Controls the behavior of the in-memory code cache "
+ " context: stat|forever|never (default: stat) "),
AP_INIT_TAKE2("LuaQuickHandler", register_quick_hook, NULL, OR_ALL,
"Provide a hook for the quick handler of request processing"),
@@ -1193,6 +1803,12 @@ command_rec lua_commands[] = {
AP_INIT_RAW_ARGS("Lua_____ByteCodeHack", hack_section_handler, NULL,
OR_ALL,
"(internal) Byte code handler"),
+ AP_INIT_TAKE23("LuaMapHandler", register_map_handler, NULL, OR_ALL,
+ "Maps a path to a lua handler"),
+ AP_INIT_TAKE3("LuaOutputFilter", register_output_filter, NULL, OR_ALL,
+ "Registers a Lua function as an output filter"),
+ AP_INIT_TAKE3("LuaInputFilter", register_input_filter, NULL, OR_ALL,
+ "Registers a Lua function as an input filter"),
{NULL}
};
@@ -1204,10 +1820,15 @@ static void *create_dir_config(apr_pool_t *p, char *dir)
cfg->package_cpaths = apr_array_make(p, 2, sizeof(char *));
cfg->mapped_handlers =
apr_array_make(p, 1, sizeof(ap_lua_mapped_handler_spec *));
+ cfg->mapped_filters =
+ apr_array_make(p, 1, sizeof(ap_lua_filter_handler_spec *));
cfg->pool = p;
cfg->hooks = apr_hash_make(p);
cfg->dir = apr_pstrdup(p, dir);
cfg->vm_scope = AP_LUA_SCOPE_UNSET;
+ cfg->codecache = AP_LUA_CACHE_UNSET;
+ cfg->vm_min = 0;
+ cfg->vm_max = 0;
return cfg;
}
@@ -1269,24 +1890,31 @@ static void *merge_dir_config(apr_pool_t *p, void *basev, void *overridesv)
a->dir = apr_pstrdup(p, overrides->dir);
a->vm_scope = (overrides->vm_scope == AP_LUA_SCOPE_UNSET) ? base->vm_scope: overrides->vm_scope;
- a->inherit = (overrides->inherit== AP_LUA_INHERIT_UNSET) ? base->inherit : overrides->inherit;
+ a->inherit = (overrides->inherit == AP_LUA_INHERIT_UNSET) ? base->inherit : overrides->inherit;
+ a->codecache = (overrides->codecache == AP_LUA_CACHE_UNSET) ? base->codecache : overrides->codecache;
+
+ a->vm_min = (overrides->vm_min == 0) ? base->vm_min : overrides->vm_min;
+ a->vm_max = (overrides->vm_max == 0) ? base->vm_max : overrides->vm_max;
if (a->inherit == AP_LUA_INHERIT_UNSET || a->inherit == AP_LUA_INHERIT_PARENT_FIRST) {
a->package_paths = apr_array_append(p, base->package_paths, overrides->package_paths);
a->package_cpaths = apr_array_append(p, base->package_cpaths, overrides->package_cpaths);
a->mapped_handlers = apr_array_append(p, base->mapped_handlers, overrides->mapped_handlers);
+ a->mapped_filters = apr_array_append(p, base->mapped_filters, overrides->mapped_filters);
a->hooks = apr_hash_merge(p, overrides->hooks, base->hooks, overlay_hook_specs, NULL);
}
else if (a->inherit == AP_LUA_INHERIT_PARENT_LAST) {
a->package_paths = apr_array_append(p, overrides->package_paths, base->package_paths);
a->package_cpaths = apr_array_append(p, overrides->package_cpaths, base->package_cpaths);
a->mapped_handlers = apr_array_append(p, overrides->mapped_handlers, base->mapped_handlers);
+ a->mapped_filters = apr_array_append(p, overrides->mapped_filters, base->mapped_filters);
a->hooks = apr_hash_merge(p, base->hooks, overrides->hooks, overlay_hook_specs, NULL);
}
else {
a->package_paths = overrides->package_paths;
a->package_cpaths = overrides->package_cpaths;
a->mapped_handlers= overrides->mapped_handlers;
+ a->mapped_filters= overrides->mapped_filters;
a->hooks= overrides->hooks;
}
@@ -1346,9 +1974,15 @@ static void lua_register_hooks(apr_pool_t *p)
APR_OPTIONAL_HOOK(ap_lua, lua_request, lua_request_hook, NULL, NULL,
APR_HOOK_REALLY_FIRST);
-
+ ap_hook_handler(lua_map_handler, NULL, NULL, AP_LUA_HOOK_FIRST);
+#if APR_HAS_THREADS
+ ap_hook_child_init(ap_lua_init_mutex, NULL, NULL, APR_HOOK_MIDDLE);
+#endif
/* providers */
lua_authz_providers = apr_hash_make(p);
+
+ /* ivm mutex */
+ apr_thread_mutex_create(&lua_ivm_mutex, APR_THREAD_MUTEX_DEFAULT, p);
}
AP_DECLARE_MODULE(lua) = {
diff --git a/modules/lua/mod_lua.dsp b/modules/lua/mod_lua.dsp
index 71d33b8b..770c13a1 100644
--- a/modules/lua/mod_lua.dsp
+++ b/modules/lua/mod_lua.dsp
@@ -117,6 +117,14 @@ SOURCE=.\lua_config.h
# End Source File
# Begin Source File
+SOURCE=.\lua_passwd.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\lua_passwd.h
+# End Source File
+# Begin Source File
+
SOURCE=.\lua_request.c
# End Source File
# Begin Source File
diff --git a/modules/lua/mod_lua.h b/modules/lua/mod_lua.h
index 52ff96ad..7fd11533 100644
--- a/modules/lua/mod_lua.h
+++ b/modules/lua/mod_lua.h
@@ -39,6 +39,7 @@
#include "apr_file_info.h"
#include "apr_time.h"
#include "apr_hooks.h"
+#include "apr_reslist.h"
/* Allow for Lua 5.2 backwards compatibility */
#define LUA_COMPAT_ALL
@@ -50,6 +51,7 @@
#if LUA_VERSION_NUM > 501
/* Load mode for lua_load() */
#define lua_load(a,b,c,d) lua_load(a,b,c,d,NULL)
+#define lua_resume(a,b) lua_resume(a, NULL, b)
#endif
/* Create a set of AP_LUA_DECLARE(type), AP_LUA_DECLARE_NONSTD(type) and
@@ -81,7 +83,7 @@ typedef enum {
AP_LUA_INHERIT_UNSET = -1,
AP_LUA_INHERIT_NONE = 0,
AP_LUA_INHERIT_PARENT_FIRST = 1,
- AP_LUA_INHERIT_PARENT_LAST = 2,
+ AP_LUA_INHERIT_PARENT_LAST = 2
} ap_lua_inherit_t;
/**
@@ -101,9 +103,10 @@ typedef struct
apr_array_header_t *package_cpaths;
/**
- * mapped handlers
+ * mapped handlers/filters
*/
apr_array_header_t *mapped_handlers;
+ apr_array_header_t *mapped_filters;
apr_pool_t *pool;
@@ -111,6 +114,8 @@ typedef struct
* AP_LUA_SCOPE_ONCE | AP_LUA_SCOPE_REQUEST | AP_LUA_SCOPE_CONN | AP_LUA_SCOPE_SERVER
*/
unsigned int vm_scope;
+ unsigned int vm_min;
+ unsigned int vm_max;
/* info for the hook harnesses */
apr_hash_t *hooks; /* <wombat_hook_info> */
@@ -120,6 +125,11 @@ typedef struct
/* Whether Lua scripts in a sub-dir are run before parents */
ap_lua_inherit_t inherit;
+
+ /**
+ * AP_LUA_CACHE_NEVER | AP_LUA_CACHE_STAT | AP_LUA_CACHE_FOREVER
+ */
+ unsigned int codecache;
} ap_lua_dir_cfg;
@@ -158,8 +168,9 @@ APR_DECLARE_EXTERNAL_HOOK(ap_lua, AP_LUA, int, lua_open,
APR_DECLARE_EXTERNAL_HOOK(ap_lua, AP_LUA, int, lua_request,
(lua_State *L, request_rec *r))
-AP_LUA_DECLARE(const char *) ap_lua_ssl_val(apr_pool_t *p, server_rec *s, conn_rec *c, request_rec *r, const char *var);
+const char *ap_lua_ssl_val(apr_pool_t *p, server_rec *s, conn_rec *c,
+ request_rec *r, const char *var);
-AP_LUA_DECLARE(int) ap_lua_ssl_is_https(conn_rec *c);
+int ap_lua_ssl_is_https(conn_rec *c);
#endif /* !_MOD_LUA_H_ */
diff --git a/modules/mappers/mod_imagemap.c b/modules/mappers/mod_imagemap.c
index 1857760b..65b9eb15 100644
--- a/modules/mappers/mod_imagemap.c
+++ b/modules/mappers/mod_imagemap.c
@@ -686,7 +686,7 @@ static int imap_handler_internal(request_rec *r)
if (!*string_pos) { /* need at least two fields */
goto need_2_fields;
}
- while(*string_pos && apr_isspace(*string_pos)) { /* past whitespace */
+ while (apr_isspace(*string_pos)) { /* past whitespace */
++string_pos;
}
diff --git a/modules/mappers/mod_negotiation.c b/modules/mappers/mod_negotiation.c
index 4a3a4573..5ec0d4d0 100644
--- a/modules/mappers/mod_negotiation.c
+++ b/modules/mappers/mod_negotiation.c
@@ -366,7 +366,7 @@ static float atoq(const char *string)
return 1.0f;
}
- while (*string && apr_isspace(*string)) {
+ while (apr_isspace(*string)) {
++string;
}
@@ -464,7 +464,7 @@ static const char *get_entry(apr_pool_t *p, accept_rec *result,
}
*cp++ = '\0'; /* Delimit var */
- while (*cp && (apr_isspace(*cp) || *cp == '=')) {
+ while (apr_isspace(*cp) || *cp == '=') {
++cp;
}
@@ -757,7 +757,7 @@ static enum header_state get_header_line(char *buffer, int len, apr_file_t *map)
/* If blank, just return it --- this ends information on this variant */
- for (cp = buffer; (*cp && apr_isspace(*cp)); ++cp) {
+ for (cp = buffer; apr_isspace(*cp); ++cp) {
continue;
}
@@ -924,7 +924,7 @@ static char *lcase_header_name_return_body(char *header, request_rec *r)
do {
++cp;
- } while (*cp && apr_isspace(*cp));
+ } while (apr_isspace(*cp));
if (!*cp) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00682)
diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c
index 4fa90546..a7ac2134 100644
--- a/modules/mappers/mod_rewrite.c
+++ b/modules/mappers/mod_rewrite.c
@@ -1255,7 +1255,7 @@ static char *lookup_map_txtfile(request_rec *r, const char *file, char *key)
}
/* jump to the value */
- while (*p && apr_isspace(*p)) {
+ while (apr_isspace(*p)) {
++p;
}
diff --git a/modules/metadata/mod_cern_meta.c b/modules/metadata/mod_cern_meta.c
index fe704f1a..f06c464c 100644
--- a/modules/metadata/mod_cern_meta.c
+++ b/modules/metadata/mod_cern_meta.c
@@ -237,7 +237,7 @@ static int scan_meta_file(request_rec *r, apr_file_t *f)
}
*l++ = '\0';
- while (*l && apr_isspace(*l))
+ while (apr_isspace(*l))
++l;
if (!strcasecmp(w, "Content-type")) {
diff --git a/modules/metadata/mod_headers.c b/modules/metadata/mod_headers.c
index 93977390..9ce2fdec 100644
--- a/modules/metadata/mod_headers.c
+++ b/modules/metadata/mod_headers.c
@@ -722,7 +722,7 @@ static int do_headers_fixup(request_rec *r, apr_table_t *headers,
while (*val) {
const char *tok_start;
- while (*val && apr_isspace(*val))
+ while (apr_isspace(*val))
++val;
tok_start = val;
diff --git a/modules/metadata/mod_remoteip.c b/modules/metadata/mod_remoteip.c
index a0bfd864..b0af3a32 100644
--- a/modules/metadata/mod_remoteip.c
+++ b/modules/metadata/mod_remoteip.c
@@ -170,10 +170,9 @@ static const char *proxies_set(cmd_parms *cmd, void *cfg,
}
if (rv != APR_SUCCESS) {
- char msgbuf[128];
- apr_strerror(rv, msgbuf, sizeof(msgbuf));
- return apr_pstrcat(cmd->pool, "RemoteIP: Error parsing IP ", arg,
- " (", msgbuf, " error) for ", cmd->cmd->name, NULL);
+ return apr_psprintf(cmd->pool,
+ "RemoteIP: Error parsing IP %s (%pm error) for %s",
+ arg, &rv, cmd->cmd->name);
}
return NULL;
@@ -192,9 +191,8 @@ static const char *proxylist_read(cmd_parms *cmd, void *cfg,
filename = ap_server_root_relative(cmd->temp_pool, filename);
rv = ap_pcfg_openfile(&cfp, cmd->temp_pool, filename);
if (rv != APR_SUCCESS) {
- return apr_psprintf(cmd->pool, "%s: Could not open file %s: %s",
- cmd->cmd->name, filename,
- apr_strerror(rv, lbuf, sizeof(lbuf)));
+ return apr_psprintf(cmd->pool, "%s: Could not open file %s: %pm",
+ cmd->cmd->name, filename, &rv);
}
while (!(ap_cfg_getline(lbuf, MAX_STRING_LEN, cfp))) {
@@ -205,6 +203,7 @@ static const char *proxylist_read(cmd_parms *cmd, void *cfg,
}
errmsg = proxies_set(cmd, cfg, arg);
if (errmsg) {
+ ap_cfg_closefile(cfp);
errmsg = apr_psprintf(cmd->pool, "%s at line %d of %s",
errmsg, cfp->line_number, filename);
return errmsg;
diff --git a/modules/metadata/mod_setenvif.c b/modules/metadata/mod_setenvif.c
index 65214cd4..a62670e0 100644
--- a/modules/metadata/mod_setenvif.c
+++ b/modules/metadata/mod_setenvif.c
@@ -314,7 +314,7 @@ static const char *add_setenvif_core(cmd_parms *cmd, void *mconfig,
*/
for (i = 0; i < sconf->conditionals->nelts; ++i) {
new = &entries[i];
- if (!strcasecmp(new->name, fname)) {
+ if (new->name && !strcasecmp(new->name, fname)) {
fname = new->name;
break;
}
diff --git a/modules/proxy/NWGNUmakefile b/modules/proxy/NWGNUmakefile
index fe491c13..dce99d16 100644
--- a/modules/proxy/NWGNUmakefile
+++ b/modules/proxy/NWGNUmakefile
@@ -165,6 +165,7 @@ TARGET_nlm = \
$(OBJDIR)/proxylbm_hb.nlm \
$(OBJDIR)/proxylbm_req.nlm \
$(OBJDIR)/proxylbm_traf.nlm \
+ $(OBJDIR)/proxywstunnel.nlm \
$(EOLIST)
#
diff --git a/modules/proxy/NWGNUproxy b/modules/proxy/NWGNUproxy
index 3c4f112f..443b4cb2 100644
--- a/modules/proxy/NWGNUproxy
+++ b/modules/proxy/NWGNUproxy
@@ -251,7 +251,7 @@ install :: nlms FORCE
vpath %.c ../arch/netware
-$(OBJDIR)/mod_proxy.imp:
+$(OBJDIR)/mod_proxy.imp: NWGNUproxy
@echo $(DL)GEN $@$(DL)
@echo $(DL)# Exports of mod_proxy$(DL)> $@
@echo $(DL) (AP$(VERSION_MAJMIN))$(DL)>> $@
@@ -283,6 +283,7 @@ $(OBJDIR)/mod_proxy.imp:
@echo $(DL) ap_proxy_connect_to_backend,$(DL)>> $@
@echo $(DL) ap_proxy_connection_create,$(DL)>> $@
@echo $(DL) ap_proxy_cookie_reverse_map,$(DL)>> $@
+ @echo $(DL) ap_proxy_create_hdrbrgd,$(DL)>> $@
@echo $(DL) ap_proxy_define_balancer,$(DL)>> $@
@echo $(DL) ap_proxy_define_worker,$(DL)>> $@
@echo $(DL) ap_proxy_determine_connection,$(DL)>> $@
@@ -300,6 +301,7 @@ $(OBJDIR)/mod_proxy.imp:
@echo $(DL) ap_proxy_is_word,$(DL)>> $@
@echo $(DL) ap_proxy_location_reverse_map,$(DL)>> $@
@echo $(DL) ap_proxy_parse_wstatus,$(DL)>> $@
+ @echo $(DL) ap_proxy_pass_brigade,$(DL)>> $@
@echo $(DL) ap_proxy_post_request,$(DL)>> $@
@echo $(DL) ap_proxy_pre_http_request,$(DL)>> $@
@echo $(DL) ap_proxy_pre_request,$(DL)>> $@
diff --git a/modules/proxy/NWGNUproxywstunnel b/modules/proxy/NWGNUproxywstunnel
new file mode 100644
index 00000000..ce84ce45
--- /dev/null
+++ b/modules/proxy/NWGNUproxywstunnel
@@ -0,0 +1,250 @@
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# Get the 'head' of the build environment if necessary. This includes default
+# targets and paths to tools
+#
+
+ifndef EnvironmentDefined
+include $(AP_WORK)/build/NWGNUhead.inc
+endif
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(STDMOD)/http \
+ $(STDMOD)/proxy \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxywstunnel
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) Proxy Web Socket Tunnel Module
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = Prxy WbSkt Module
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 8192
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = $(OBJDIR)/$(NLM_NAME).nlm
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib =
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_proxy_wstunnel.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ libc \
+ aprlib \
+ proxy \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ @$(OBJDIR)/mod_proxy.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_wstunnel_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+
+#
+# Any specialized rules here
+#
+
+vpath %.c balancers
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/proxy/ajp_utils.c b/modules/proxy/ajp_utils.c
index 567af918..2fe9f72d 100644
--- a/modules/proxy/ajp_utils.c
+++ b/modules/proxy/ajp_utils.c
@@ -113,7 +113,7 @@ cleanup:
return #x;\
break;
-/**·
+/**
* Convert numeric message type into string
* @param type AJP message type
* @return AJP message type as a string
diff --git a/modules/proxy/balancers/mod_lbmethod_heartbeat.c b/modules/proxy/balancers/mod_lbmethod_heartbeat.c
index 26c81584..77fb994f 100644
--- a/modules/proxy/balancers/mod_lbmethod_heartbeat.c
+++ b/modules/proxy/balancers/mod_lbmethod_heartbeat.c
@@ -407,7 +407,7 @@ static void *lb_hb_create_config(apr_pool_t *p, server_rec *s)
{
lb_hb_ctx_t *ctx = (lb_hb_ctx_t *) apr_palloc(p, sizeof(lb_hb_ctx_t));
- ctx->path = ap_server_root_relative(p, "logs/hb.dat");
+ ctx->path = ap_runtime_dir_relative(p, DEFAULT_HEARTBEAT_STORAGE);
return ctx;
}
@@ -442,7 +442,7 @@ static const char *cmd_lb_hb_storage(cmd_parms *cmd,
return err;
}
- ctx->path = ap_server_root_relative(p, path);
+ ctx->path = ap_runtime_dir_relative(p, path);
return NULL;
}
diff --git a/modules/proxy/config.m4 b/modules/proxy/config.m4
index e91cbf4c..ce625910 100644
--- a/modules/proxy/config.m4
+++ b/modules/proxy/config.m4
@@ -20,6 +20,7 @@ proxy_fcgi_objs="mod_proxy_fcgi.lo"
proxy_scgi_objs="mod_proxy_scgi.lo"
proxy_fdpass_objs="mod_proxy_fdpass.lo"
proxy_ajp_objs="mod_proxy_ajp.lo ajp_header.lo ajp_link.lo ajp_msg.lo ajp_utils.lo"
+proxy_wstunnel_objs="mod_proxy_wstunnel.lo"
proxy_balancer_objs="mod_proxy_balancer.lo"
case "$host" in
@@ -33,6 +34,7 @@ case "$host" in
proxy_scgi_objs="$proxy_scgi_objs mod_proxy.la"
proxy_fdpass_objs="$proxy_fdpass_objs mod_proxy.la"
proxy_ajp_objs="$proxy_ajp_objs mod_proxy.la"
+ proxy_wstunnel_objs="$proxy_wstunnel_objs mod_proxy.la"
proxy_balancer_objs="$proxy_balancer_objs mod_proxy.la"
;;
esac
@@ -52,6 +54,7 @@ APACHE_MODULE(proxy_fdpass, Apache proxy to Unix Daemon Socket module. Requires
enable_proxy_fdpass=no
fi
],proxy)
+APACHE_MODULE(proxy_wstunnel, Apache proxy Websocket Tunnel module. Requires and is enabled by --enable-proxy., $proxy_wstunnel_objs, , $proxy_mods_enable,, proxy)
APACHE_MODULE(proxy_ajp, Apache proxy AJP module. Requires and is enabled by --enable-proxy., $proxy_ajp_objs, , $proxy_mods_enable,, proxy)
APACHE_MODULE(proxy_balancer, Apache proxy BALANCER module. Requires and is enabled by --enable-proxy., $proxy_balancer_objs, , $proxy_mods_enable,, proxy)
diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
index 0bd3a6c9..0ee2ff35 100644
--- a/modules/proxy/mod_proxy.c
+++ b/modules/proxy/mod_proxy.c
@@ -36,6 +36,9 @@ APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup,
#define MAX(x,y) ((x) >= (y) ? (x) : (y))
#endif
+static const char * const proxy_id = "proxy";
+apr_global_mutex_t *proxy_mutex = NULL;
+
/*
* A Web proxy module. Stages:
*
@@ -383,6 +386,14 @@ static const char *set_balancer_param(proxy_server_conf *conf,
}
}
+ else if (!strcasecmp(key, "failontimeout")) {
+ if (!strcasecmp(val, "on"))
+ balancer->failontimeout = 1;
+ else if (!strcasecmp(val, "off"))
+ balancer->failontimeout = 0;
+ else
+ return "failontimeout must be On|Off";
+ }
else if (!strcasecmp(key, "nonce")) {
if (!strcasecmp(val, "None")) {
*balancer->s->nonce = '\0';
@@ -871,7 +882,7 @@ static int proxy_handler(request_rec *r)
int i, rc, access_status;
int direct_connect = 0;
const char *str;
- long maxfwd;
+ apr_int64_t maxfwd;
proxy_balancer *balancer = NULL;
proxy_worker *worker = NULL;
int attempts = 0, max_attempts = 0;
@@ -883,8 +894,14 @@ static int proxy_handler(request_rec *r)
/* handle max-forwards / OPTIONS / TRACE */
if ((str = apr_table_get(r->headers_in, "Max-Forwards"))) {
- maxfwd = strtol(str, NULL, 10);
- if (maxfwd < 1) {
+ char *end;
+ maxfwd = apr_strtoi64(str, &end, 10);
+ if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) {
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
+ apr_psprintf(r->pool,
+ "Max-Forwards value '%s' could not be parsed", str));
+ }
+ else if (maxfwd == 0) {
switch (r->method_number) {
case M_TRACE: {
int access_status;
@@ -905,7 +922,7 @@ static int proxy_handler(request_rec *r)
return OK;
}
default: {
- return ap_proxyerror(r, HTTP_BAD_GATEWAY,
+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
"Max-Forwards has reached zero - proxy loop?");
}
}
@@ -918,7 +935,7 @@ static int proxy_handler(request_rec *r)
}
if (maxfwd >= 0) {
apr_table_setn(r->headers_in, "Max-Forwards",
- apr_psprintf(r->pool, "%ld", maxfwd));
+ apr_psprintf(r->pool, "%" APR_INT64_T_FMT, maxfwd));
}
if (r->method_number == M_TRACE) {
@@ -1160,6 +1177,10 @@ static void * create_proxy_config(apr_pool_t *p, server_rec *s)
ps->req = 0;
ps->max_balancers = 0;
ps->bal_persist = 0;
+ ps->inherit = 1;
+ ps->inherit_set = 0;
+ ps->ppinherit = 1;
+ ps->ppinherit_set = 0;
ps->bgrowth = 5;
ps->bgrowth_set = 0;
ps->req_set = 0;
@@ -1175,7 +1196,7 @@ static void * create_proxy_config(apr_pool_t *p, server_rec *s)
ps->badopt_set = 0;
ps->source_address = NULL;
ps->source_address_set = 0;
- ps->pool = p;
+ apr_pool_create_ex(&ps->pool, p, NULL, NULL);
return ps;
}
@@ -1186,13 +1207,30 @@ static void * merge_proxy_config(apr_pool_t *p, void *basev, void *overridesv)
proxy_server_conf *base = (proxy_server_conf *) basev;
proxy_server_conf *overrides = (proxy_server_conf *) overridesv;
- ps->proxies = apr_array_append(p, base->proxies, overrides->proxies);
+ ps->inherit = (overrides->inherit_set == 0) ? base->inherit : overrides->inherit;
+ ps->inherit_set = overrides->inherit_set || base->inherit_set;
+
+ ps->ppinherit = (overrides->ppinherit_set == 0) ? base->ppinherit : overrides->ppinherit;
+ ps->ppinherit_set = overrides->ppinherit_set || base->ppinherit_set;
+
+ if (ps->ppinherit) {
+ ps->proxies = apr_array_append(p, base->proxies, overrides->proxies);
+ }
+ else {
+ ps->proxies = overrides->proxies;
+ }
ps->sec_proxy = apr_array_append(p, base->sec_proxy, overrides->sec_proxy);
ps->aliases = apr_array_append(p, base->aliases, overrides->aliases);
ps->noproxies = apr_array_append(p, base->noproxies, overrides->noproxies);
ps->dirconn = apr_array_append(p, base->dirconn, overrides->dirconn);
- ps->workers = apr_array_append(p, base->workers, overrides->workers);
- ps->balancers = apr_array_append(p, base->balancers, overrides->balancers);
+ if (ps->inherit || ps->ppinherit) {
+ ps->workers = apr_array_append(p, base->workers, overrides->workers);
+ ps->balancers = apr_array_append(p, base->balancers, overrides->balancers);
+ }
+ else {
+ ps->workers = overrides->workers;
+ ps->balancers = overrides->balancers;
+ }
ps->forward = overrides->forward ? overrides->forward : base->forward;
ps->reverse = overrides->reverse ? overrides->reverse : base->reverse;
@@ -1220,7 +1258,7 @@ static void * merge_proxy_config(apr_pool_t *p, void *basev, void *overridesv)
ps->proxy_status_set = overrides->proxy_status_set || base->proxy_status_set;
ps->source_address = (overrides->source_address_set == 0) ? base->source_address : overrides->source_address;
ps->source_address_set = overrides->source_address_set || base->source_address_set;
- ps->pool = p;
+ ps->pool = base->pool;
return ps;
}
static const char *set_source_address(cmd_parms *parms, void *dummy,
@@ -1890,6 +1928,26 @@ static const char *set_persist(cmd_parms *parms, void *dummy, int flag)
return NULL;
}
+static const char *set_inherit(cmd_parms *parms, void *dummy, int flag)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ psf->inherit = flag;
+ psf->inherit_set = 1;
+ return NULL;
+}
+
+static const char *set_ppinherit(cmd_parms *parms, void *dummy, int flag)
+{
+ proxy_server_conf *psf =
+ ap_get_module_config(parms->server->module_config, &proxy_module);
+
+ psf->ppinherit = flag;
+ psf->ppinherit_set = 1;
+ return NULL;
+}
+
static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg)
{
server_rec *s = cmd->server;
@@ -2279,6 +2337,12 @@ static const command_rec proxy_cmds[] =
"Number of additional Balancers that can be added post-config"),
AP_INIT_FLAG("BalancerPersist", set_persist, NULL, RSRC_CONF,
"on if the balancer should persist changes on reboot/restart made via the Balancer Manager"),
+ AP_INIT_FLAG("BalancerInherit", set_inherit, NULL, RSRC_CONF,
+ "on if this server should inherit Balancers and Workers defined in the main server "
+ "(Setting to off recommended if using the Balancer Manager)"),
+ AP_INIT_FLAG("ProxyPassInherit", set_ppinherit, NULL, RSRC_CONF,
+ "on if this server should inherit all ProxyPass directives defined in the main server "
+ "(Setting to off recommended if using the Balancer Manager)"),
AP_INIT_TAKE1("ProxyStatus", set_status_opt, NULL, RSRC_CONF,
"Configure Status: proxy status to one of: on | off | full"),
AP_INIT_RAW_ARGS("ProxySet", set_proxy_param, NULL, RSRC_CONF|ACCESS_CONF,
@@ -2341,6 +2405,13 @@ PROXY_DECLARE(const char *) ap_proxy_ssl_val(apr_pool_t *p, server_rec *s,
static int proxy_post_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
+ apr_status_t rv = ap_global_mutex_create(&proxy_mutex, NULL,
+ proxy_id, NULL, s, pconf, 0);
+ if (rv != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02478)
+ "failed to create %s mutex", proxy_id);
+ return rv;
+ }
proxy_ssl_enable = APR_RETRIEVE_OPTIONAL_FN(ssl_proxy_enable);
proxy_ssl_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable);
@@ -2443,6 +2514,15 @@ static void child_init(apr_pool_t *p, server_rec *s)
{
proxy_worker *reverse = NULL;
+ apr_status_t rv = apr_global_mutex_child_init(&proxy_mutex,
+ apr_global_mutex_lockfile(proxy_mutex),
+ p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(02479)
+ "could not init proxy_mutex in child");
+ exit(1); /* Ugly, but what else? */
+ }
+
/* TODO */
while (s) {
void *sconf = s->module_config;
@@ -2500,11 +2580,19 @@ static void child_init(apr_pool_t *p, server_rec *s)
/*
* This routine is called before the server processes the configuration
- * files. There is no return value.
+ * files.
*/
static int proxy_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp)
{
+ apr_status_t rv = ap_mutex_register(pconf, proxy_id, NULL,
+ APR_LOCK_DEFAULT, 0);
+ if (rv != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02480)
+ "failed to register %s mutex", proxy_id);
+ return 500; /* An HTTP status would be a misnomer! */
+ }
+
APR_OPTIONAL_HOOK(ap, status_hook, proxy_status_hook, NULL, NULL,
APR_HOOK_MIDDLE);
/* Reset workers count on gracefull restart */
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
index 5074aa16..81fd14c1 100644
--- a/modules/proxy/mod_proxy.h
+++ b/modules/proxy/mod_proxy.h
@@ -164,7 +164,7 @@ typedef struct {
status_full
} proxy_status; /* Status display options */
apr_sockaddr_t *source_address;
- apr_global_mutex_t *mutex; /* global lock (needed??) */
+ apr_global_mutex_t *mutex; /* global lock, for pool, etc */
ap_slotmem_instance_t *bslot; /* balancers shm data - runtime */
ap_slotmem_provider_t *storage;
@@ -179,6 +179,10 @@ typedef struct {
unsigned int source_address_set:1;
unsigned int bgrowth_set:1;
unsigned int bal_persist:1;
+ unsigned int inherit:1;
+ unsigned int inherit_set:1;
+ unsigned int ppinherit:1;
+ unsigned int ppinherit_set:1;
} proxy_server_conf;
@@ -446,6 +450,7 @@ struct proxy_balancer {
proxy_server_conf *sconf;
void *context; /* general purpose storage */
proxy_balancer_shared *s; /* Shared data */
+ int failontimeout; /* Whether to mark a member in Err if IO timeout occurs */
};
struct proxy_balancer_method {
@@ -915,6 +920,56 @@ PROXY_DECLARE(int) ap_proxy_trans_match(request_rec *r,
struct proxy_alias *ent,
proxy_dir_conf *dconf);
+/**
+ * Create a HTTP request header brigade, old_cl_val and old_te_val as required.
+ * @parama p pool
+ * @param header_brigade header brigade to use/fill
+ * @param r request
+ * @param p_conn proxy connection rec
+ * @param worker selected worker
+ * @param conf per-server proxy config
+ * @param uri uri
+ * @param url url
+ * @param server_portstr port as string
+ * @param old_cl_val stored old content-len val
+ * @param old_te_val stored old TE val
+ * @return OK or HTTP_EXPECTATION_FAILED
+ */
+PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
+ apr_bucket_brigade *header_brigade,
+ request_rec *r,
+ proxy_conn_rec *p_conn,
+ proxy_worker *worker,
+ proxy_server_conf *conf,
+ apr_uri_t *uri,
+ char *url, char *server_portstr,
+ char **old_cl_val,
+ char **old_te_val);
+
+/**
+ * @param bucket_alloc bucket allocator
+ * @param r request
+ * @param p_conn proxy connection
+ * @param origin connection rec of origin
+ * @param bb brigade to send to origin
+ * @param flush flush
+ * @return status (OK)
+ */
+PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ request_rec *r, proxy_conn_rec *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
+ int flush);
+
+/**
+ * Clear the headers referenced by the Connection header from the given
+ * table, and remove the Connection header.
+ * @param r request
+ * @param headers table of headers to clear
+ * @return 1 if "close" was present, 0 otherwise.
+ */
+APR_DECLARE_OPTIONAL_FN(int, ap_proxy_clear_connection,
+ (request_rec *r, apr_table_t *headers));
+
#define PROXY_LBMETHOD "proxylbmethod"
/* The number of dynamic workers that can be added when reconfiguring.
diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c
index cd9987e1..0f45be7a 100644
--- a/modules/proxy/mod_proxy_balancer.c
+++ b/modules/proxy/mod_proxy_balancer.c
@@ -649,6 +649,17 @@ static int proxy_balancer_post_request(proxy_worker *worker,
}
}
+ if (balancer->failontimeout
+ && (apr_table_get(r->notes, "proxy_timedout")) != NULL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02460)
+ "%s: Forcing worker (%s) into error state "
+ "due to timeout and 'failonstatus' parameter being set",
+ balancer->s->name, worker->s->name);
+ worker->s->status |= PROXY_WORKER_IN_ERROR;
+ worker->s->error_time = apr_time_now();
+
+ }
+
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
"%s: Unlock failed for post_request", balancer->s->name);
@@ -769,9 +780,9 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
continue;
}
if (conf->bal_persist) {
- type = AP_SLOTMEM_TYPE_PREGRAB | AP_SLOTMEM_TYPE_PERSIST;
+ type = AP_SLOTMEM_TYPE_PERSIST;
} else {
- type = AP_SLOTMEM_TYPE_PREGRAB;
+ type = 0;
}
if (conf->balancers->nelts) {
conf->max_balancers = conf->balancers->nelts + conf->bgrowth;
@@ -1160,7 +1171,7 @@ static int balancer_handler(request_rec *r)
(val = apr_table_get(params, "b_nwrkr"))) {
char *ret;
proxy_worker *nworker;
- nworker = ap_proxy_get_worker(conf->pool, bsel, conf, val);
+ nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
if (!nworker && storage->num_free_slots(bsel->wslot)) {
if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
@@ -1675,8 +1686,9 @@ static void ap_proxy_balancer_register_hook(apr_pool_t *p)
* initializes
*/
static const char *const aszPred[] = { "mpm_winnt.c", "mod_slotmem_shm.c", NULL};
+ static const char *const aszPred2[] = { "mod_proxy.c", NULL};
/* manager handler */
- ap_hook_post_config(balancer_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(balancer_post_config, aszPred2, NULL, APR_HOOK_MIDDLE);
ap_hook_pre_config(balancer_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(balancer_handler, NULL, NULL, APR_HOOK_FIRST);
ap_hook_child_init(balancer_child_init, aszPred, NULL, APR_HOOK_MIDDLE);
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
index 07b5408d..cffad2e7 100644
--- a/modules/proxy/mod_proxy_http.c
+++ b/modules/proxy/mod_proxy_http.c
@@ -21,6 +21,9 @@
module AP_MODULE_DECLARE_DATA proxy_http_module;
+static int (*ap_proxy_clear_connection_fn)(request_rec *r, apr_table_t *headers) =
+ NULL;
+
static apr_status_t ap_proxy_http_cleanup(const char *scheme,
request_rec *r,
proxy_conn_rec *backend);
@@ -178,33 +181,7 @@ static apr_table_t *ap_proxy_clean_warnings(apr_pool_t *p, apr_table_t *headers)
return headers;
}
}
-static int clear_conn_headers(void *data, const char *key, const char *val)
-{
- apr_table_t *headers = ((header_dptr*)data)->table;
- apr_pool_t *pool = ((header_dptr*)data)->pool;
- const char *name;
- char *next = apr_pstrdup(pool, val);
- while (*next) {
- name = next;
- while (*next && !apr_isspace(*next) && (*next != ',')) {
- ++next;
- }
- while (*next && (apr_isspace(*next) || (*next == ','))) {
- *next++ = '\0';
- }
- apr_table_unset(headers, name);
- }
- return 1;
-}
-static void ap_proxy_clear_connection(apr_pool_t *p, apr_table_t *headers)
-{
- header_dptr x;
- x.pool = p;
- x.table = headers;
- apr_table_unset(headers, "Proxy-Connection");
- apr_table_do(clear_conn_headers, &x, headers, "Connection", NULL);
- apr_table_unset(headers, "Connection");
-}
+
static void add_te_chunked(apr_pool_t *p,
apr_bucket_alloc_t *bucket_alloc,
apr_bucket_brigade *header_brigade)
@@ -250,44 +227,6 @@ static void terminate_headers(apr_bucket_alloc_t *bucket_alloc,
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
}
-static int pass_brigade(apr_bucket_alloc_t *bucket_alloc,
- request_rec *r, proxy_conn_rec *p_conn,
- conn_rec *origin, apr_bucket_brigade *bb,
- int flush)
-{
- apr_status_t status;
- apr_off_t transferred;
-
- if (flush) {
- apr_bucket *e = apr_bucket_flush_create(bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- }
- apr_brigade_length(bb, 0, &transferred);
- if (transferred != -1)
- p_conn->worker->s->transferred += transferred;
- status = ap_pass_brigade(origin->output_filters, bb);
- if (status != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01084)
- "pass request body failed to %pI (%s)",
- p_conn->addr, p_conn->hostname);
- if (origin->aborted) {
- const char *ssl_note;
-
- if (((ssl_note = apr_table_get(origin->notes, "SSL_connect_rv"))
- != NULL) && (strcmp(ssl_note, "err") == 0)) {
- return ap_proxyerror(r, HTTP_INTERNAL_SERVER_ERROR,
- "Error during SSL Handshake with"
- " remote server");
- }
- return APR_STATUS_IS_TIMEUP(status) ? HTTP_GATEWAY_TIME_OUT : HTTP_BAD_GATEWAY;
- }
- else {
- return HTTP_BAD_REQUEST;
- }
- }
- apr_brigade_cleanup(bb);
- return OK;
-}
#define MAX_MEM_SPOOL 16384
@@ -366,7 +305,7 @@ static int stream_reqbody_chunked(apr_pool_t *p,
}
/* The request is flushed below this loop with chunk EOS header */
- rv = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0);
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0);
if (rv != OK) {
return rv;
}
@@ -412,7 +351,7 @@ static int stream_reqbody_chunked(apr_pool_t *p,
}
/* Now we have headers-only, or the chunk EOS mark; flush it */
- rv = pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1);
return rv;
}
@@ -422,7 +361,7 @@ static int stream_reqbody_cl(apr_pool_t *p,
conn_rec *origin,
apr_bucket_brigade *header_brigade,
apr_bucket_brigade *input_brigade,
- const char *old_cl_val)
+ char *old_cl_val)
{
int seen_eos = 0, rv = 0;
apr_status_t status = APR_SUCCESS;
@@ -511,7 +450,7 @@ static int stream_reqbody_cl(apr_pool_t *p,
}
/* Once we hit EOS, we are ready to flush. */
- rv = pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos);
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos);
if (rv != OK) {
return rv ;
}
@@ -541,7 +480,7 @@ static int stream_reqbody_cl(apr_pool_t *p,
* body; send it now with the flush flag
*/
bb = header_brigade;
- return(pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1));
+ return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1));
}
return OK;
@@ -685,7 +624,7 @@ static int spool_reqbody_cl(apr_pool_t *p,
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
}
/* This is all a single brigade, pass with flush flagged */
- return(pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1));
+ return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1));
}
/*
@@ -752,257 +691,31 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r,
apr_bucket_brigade *temp_brigade;
apr_bucket *e;
char *buf;
- const apr_array_header_t *headers_in_array;
- const apr_table_entry_t *headers_in;
- int counter;
apr_status_t status;
enum rb_methods {RB_INIT, RB_STREAM_CL, RB_STREAM_CHUNKED, RB_SPOOL_CL};
enum rb_methods rb_method = RB_INIT;
- const char *old_cl_val = NULL;
- const char *old_te_val = NULL;
+ char *old_cl_val = NULL;
+ char *old_te_val = NULL;
apr_off_t bytes_read = 0;
apr_off_t bytes;
int force10, rv;
- apr_table_t *headers_in_copy;
- proxy_dir_conf *dconf;
conn_rec *origin = p_conn->connection;
- int do_100_continue;
-
- dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
- header_brigade = apr_brigade_create(p, origin->bucket_alloc);
-
- /*
- * Send the HTTP/1.1 request to the remote server
- */
-
- /*
- * To be compliant, we only use 100-Continue for requests with bodies.
- * We also make sure we won't be talking HTTP/1.0 as well.
- */
- do_100_continue = (worker->s->ping_timeout_set
- && ap_request_has_body(r)
- && (PROXYREQ_REVERSE == r->proxyreq)
- && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
- /*
- * According to RFC 2616 8.2.3 we are not allowed to forward an
- * Expect: 100-continue to an HTTP/1.0 server. Instead we MUST return
- * a HTTP_EXPECTATION_FAILED
- */
if (r->expecting_100) {
return HTTP_EXPECTATION_FAILED;
}
- buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL);
force10 = 1;
- p_conn->close = 1;
} else {
- buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL);
force10 = 0;
}
- if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) {
- origin->keepalive = AP_CONN_CLOSE;
- p_conn->close = 1;
- }
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
- if (dconf->preserve_host == 0) {
- if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
- if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
- buf = apr_pstrcat(p, "Host: [", uri->hostname, "]:",
- uri->port_str, CRLF, NULL);
- } else {
- buf = apr_pstrcat(p, "Host: [", uri->hostname, "]", CRLF, NULL);
- }
- } else {
- if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
- buf = apr_pstrcat(p, "Host: ", uri->hostname, ":",
- uri->port_str, CRLF, NULL);
- } else {
- buf = apr_pstrcat(p, "Host: ", uri->hostname, CRLF, NULL);
- }
- }
- }
- else {
- /* don't want to use r->hostname, as the incoming header might have a
- * port attached
- */
- const char* hostname = apr_table_get(r->headers_in,"Host");
- if (!hostname) {
- hostname = r->server->server_hostname;
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
- "no HTTP 0.9 request (with no host line) "
- "on incoming request and preserve host set "
- "forcing hostname to be %s for uri %s",
- hostname, r->uri);
- }
- buf = apr_pstrcat(p, "Host: ", hostname, CRLF, NULL);
- }
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
-
- /* handle Via */
- if (conf->viaopt == via_block) {
- /* Block all outgoing Via: headers */
- apr_table_unset(r->headers_in, "Via");
- } else if (conf->viaopt != via_off) {
- const char *server_name = ap_get_server_name(r);
- /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
- * then the server name returned by ap_get_server_name() is the
- * origin server name (which does make too much sense with Via: headers)
- * so we use the proxy vhost's name instead.
- */
- if (server_name == r->hostname)
- server_name = r->server->server_hostname;
- /* Create a "Via:" request header entry and merge it */
- /* Generate outgoing Via: header with/without server comment: */
- apr_table_mergen(r->headers_in, "Via",
- (conf->viaopt == via_full)
- ? apr_psprintf(p, "%d.%d %s%s (%s)",
- HTTP_VERSION_MAJOR(r->proto_num),
- HTTP_VERSION_MINOR(r->proto_num),
- server_name, server_portstr,
- AP_SERVER_BASEVERSION)
- : apr_psprintf(p, "%d.%d %s%s",
- HTTP_VERSION_MAJOR(r->proto_num),
- HTTP_VERSION_MINOR(r->proto_num),
- server_name, server_portstr)
- );
- }
-
- /* Use HTTP/1.1 100-Continue as quick "HTTP ping" test
- * to backend
- */
- if (do_100_continue) {
- apr_table_mergen(r->headers_in, "Expect", "100-Continue");
- r->expecting_100 = 1;
- }
-
- /* X-Forwarded-*: handling
- *
- * XXX Privacy Note:
- * -----------------
- *
- * These request headers are only really useful when the mod_proxy
- * is used in a reverse proxy configuration, so that useful info
- * about the client can be passed through the reverse proxy and on
- * to the backend server, which may require the information to
- * function properly.
- *
- * In a forward proxy situation, these options are a potential
- * privacy violation, as information about clients behind the proxy
- * are revealed to arbitrary servers out there on the internet.
- *
- * The HTTP/1.1 Via: header is designed for passing client
- * information through proxies to a server, and should be used in
- * a forward proxy configuation instead of X-Forwarded-*. See the
- * ProxyVia option for details.
- */
- if (dconf->add_forwarded_headers) {
- if (PROXYREQ_REVERSE == r->proxyreq) {
- const char *buf;
-
- /* Add X-Forwarded-For: so that the upstream has a chance to
- * determine, where the original request came from.
- */
- apr_table_mergen(r->headers_in, "X-Forwarded-For",
- r->useragent_ip);
-
- /* Add X-Forwarded-Host: so that upstream knows what the
- * original request hostname was.
- */
- if ((buf = apr_table_get(r->headers_in, "Host"))) {
- apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
- }
-
- /* Add X-Forwarded-Server: so that upstream knows what the
- * name of this proxy server is (if there are more than one)
- * XXX: This duplicates Via: - do we strictly need it?
- */
- apr_table_mergen(r->headers_in, "X-Forwarded-Server",
- r->server->server_hostname);
- }
- }
- proxy_run_fixups(r);
- /*
- * Make a copy of the headers_in table before clearing the connection
- * headers as we need the connection headers later in the http output
- * filter to prepare the correct response headers.
- *
- * Note: We need to take r->pool for apr_table_copy as the key / value
- * pairs in r->headers_in have been created out of r->pool and
- * p might be (and actually is) a longer living pool.
- * This would trigger the bad pool ancestry abort in apr_table_copy if
- * apr is compiled with APR_POOL_DEBUG.
- */
- headers_in_copy = apr_table_copy(r->pool, r->headers_in);
- ap_proxy_clear_connection(p, headers_in_copy);
- /* send request headers */
- headers_in_array = apr_table_elts(headers_in_copy);
- headers_in = (const apr_table_entry_t *) headers_in_array->elts;
- for (counter = 0; counter < headers_in_array->nelts; counter++) {
- if (headers_in[counter].key == NULL
- || headers_in[counter].val == NULL
-
- /* Already sent */
- || !strcasecmp(headers_in[counter].key, "Host")
-
- /* Clear out hop-by-hop request headers not to send
- * RFC2616 13.5.1 says we should strip these headers
- */
- || !strcasecmp(headers_in[counter].key, "Keep-Alive")
- || !strcasecmp(headers_in[counter].key, "TE")
- || !strcasecmp(headers_in[counter].key, "Trailer")
- || !strcasecmp(headers_in[counter].key, "Upgrade")
-
- ) {
- continue;
- }
- /* Do we want to strip Proxy-Authorization ?
- * If we haven't used it, then NO
- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
- * So let's make it configurable by env.
- */
- if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) {
- if (r->user != NULL) { /* we've authenticated */
- if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
- continue;
- }
- }
- }
-
-
- /* Skip Transfer-Encoding and Content-Length for now.
- */
- if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) {
- old_te_val = headers_in[counter].val;
- continue;
- }
- if (!strcasecmp(headers_in[counter].key, "Content-Length")) {
- old_cl_val = headers_in[counter].val;
- continue;
- }
-
- /* for sub-requests, ignore freshness/expiry headers */
- if (r->main) {
- if ( !strcasecmp(headers_in[counter].key, "If-Match")
- || !strcasecmp(headers_in[counter].key, "If-Modified-Since")
- || !strcasecmp(headers_in[counter].key, "If-Range")
- || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since")
- || !strcasecmp(headers_in[counter].key, "If-None-Match")) {
- continue;
- }
- }
-
- buf = apr_pstrcat(p, headers_in[counter].key, ": ",
- headers_in[counter].val, CRLF,
- NULL);
- ap_xlate_proto_to_ascii(buf, strlen(buf));
- e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ header_brigade = apr_brigade_create(p, origin->bucket_alloc);
+ rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn,
+ worker, conf, uri, url, server_portstr,
+ &old_cl_val, &old_te_val);
+ if (rv != OK) {
+ return rv;
}
/* We have headers, let's figure out our request body... */
@@ -1576,6 +1289,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
"error reading status line from remote "
"server %s:%d", backend->hostname, backend->port);
if (APR_STATUS_IS_TIMEUP(rc)) {
+ apr_table_set(r->notes, "proxy_timedout", "1");
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01103) "read timeout");
if (do_100_continue) {
return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, "Timeout on 100-Continue");
@@ -1754,11 +1468,10 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
* ap_http_filter to know where to end.
*/
te = apr_table_get(r->headers_out, "Transfer-Encoding");
+
/* strip connection listed hop-by-hop headers from response */
- if (ap_find_token(p, apr_table_get(r->headers_out, "Connection"),
- "close"))
- backend->close = 1;
- ap_proxy_clear_connection(p, r->headers_out);
+ backend->close = ap_proxy_clear_connection_fn(r, r->headers_out);
+
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
ap_set_content_type(r, apr_pstrdup(p, buf));
}
@@ -1770,6 +1483,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
for (i=0; hop_by_hop_hdrs[i]; ++i) {
apr_table_unset(r->headers_out, hop_by_hop_hdrs[i]);
}
+
/* Delete warnings with wrong date */
r->headers_out = ap_proxy_clean_warnings(p, r->headers_out);
@@ -1837,12 +1551,16 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
* behaviour here might break something.
*
* So let's make it configurable.
+ *
+ * We need to set "r->expecting_100 = 1" otherwise origin
+ * server behaviour will apply.
*/
const char *policy = apr_table_get(r->subprocess_env,
"proxy-interim-response");
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"HTTP: received interim %d response", r->status);
- if (!policy || !strcasecmp(policy, "RFC")) {
+ if (!policy
+ || (!strcasecmp(policy, "RFC") && ((r->expecting_100 = 1)))) {
ap_send_interim_response(r, 1);
}
/* FIXME: refine this to be able to specify per-response-status
@@ -2294,8 +2012,34 @@ cleanup:
}
return status;
}
+
+/* post_config hook: */
+static int proxy_http_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+
+ /* proxy_http_post_config() will be called twice during startup. So, don't
+ * set up the static data the 1st time through. */
+ if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
+ return OK;
+ }
+
+ if (!ap_proxy_clear_connection_fn) {
+ ap_proxy_clear_connection_fn =
+ APR_RETRIEVE_OPTIONAL_FN(ap_proxy_clear_connection);
+ if (!ap_proxy_clear_connection_fn) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02477)
+ "mod_proxy must be loaded for mod_proxy_http");
+ return !OK;
+ }
+ }
+
+ return OK;
+}
+
static void ap_proxy_http_register_hook(apr_pool_t *p)
{
+ ap_hook_post_config(proxy_http_post_config, NULL, NULL, APR_HOOK_MIDDLE);
proxy_hook_scheme_handler(proxy_http_handler, NULL, NULL, APR_HOOK_FIRST);
proxy_hook_canon_handler(proxy_http_canon, NULL, NULL, APR_HOOK_FIRST);
warn_rx = ap_pregcomp(p, "[0-9]{3}[ \t]+[^ \t]+[ \t]+\"[^\"]*\"([ \t]+\"([^\"]+)\")?", 0);
diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c
new file mode 100644
index 00000000..365a2054
--- /dev/null
+++ b/modules/proxy/mod_proxy_wstunnel.c
@@ -0,0 +1,399 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mod_proxy.h"
+
+module AP_MODULE_DECLARE_DATA proxy_wstunnel_module;
+
+/*
+ * Canonicalise http-like URLs.
+ * scheme is the scheme for the URL
+ * url is the URL starting with the first '/'
+ * def_port is the default port for this scheme.
+ */
+static int proxy_wstunnel_canon(request_rec *r, char *url)
+{
+ char *host, *path, sport[7];
+ char *search = NULL;
+ const char *err;
+ char *scheme;
+ apr_port_t port, def_port;
+
+ /* ap_port_of_scheme() */
+ if (strncasecmp(url, "ws:", 3) == 0) {
+ url += 3;
+ scheme = "ws:";
+ def_port = apr_uri_port_of_scheme("http");
+ }
+ else if (strncasecmp(url, "wss:", 4) == 0) {
+ url += 4;
+ scheme = "wss:";
+ def_port = apr_uri_port_of_scheme("https");
+ }
+ else {
+ return DECLINED;
+ }
+
+ port = def_port;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "canonicalising URL %s", url);
+
+ /*
+ * do syntactic check.
+ * We break the URL into host, port, path, search
+ */
+ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02439) "error parsing URL %s: %s",
+ url, err);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ * now parse path/search args, according to rfc1738:
+ * process the path. With proxy-nocanon set (by
+ * mod_proxy) we use the raw, unparsed uri
+ */
+ if (apr_table_get(r->notes, "proxy-nocanon")) {
+ path = url; /* this is the raw path */
+ }
+ else {
+ path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
+ r->proxyreq);
+ search = r->args;
+ }
+ if (path == NULL)
+ return HTTP_BAD_REQUEST;
+
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+
+ if (ap_strchr_c(host, ':')) {
+ /* if literal IPv6 address */
+ host = apr_pstrcat(r->pool, "[", host, "]", NULL);
+ }
+ r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "//", host, sport,
+ "/", path, (search) ? "?" : "",
+ (search) ? search : "", NULL);
+ return OK;
+}
+
+
+static int proxy_wstunnel_transfer(request_rec *r, conn_rec *c_i, conn_rec *c_o,
+ apr_bucket_brigade *bb, char *name)
+{
+ int rv;
+#ifdef DEBUGGING
+ apr_off_t len;
+#endif
+
+ do {
+ apr_brigade_cleanup(bb);
+ rv = ap_get_brigade(c_i->input_filters, bb, AP_MODE_READBYTES,
+ APR_NONBLOCK_READ, AP_IOBUFSIZE);
+ if (rv == APR_SUCCESS) {
+ if (c_o->aborted)
+ return APR_EPIPE;
+ if (APR_BRIGADE_EMPTY(bb))
+ break;
+#ifdef DEBUGGING
+ len = -1;
+ apr_brigade_length(bb, 0, &len);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02440)
+ "read %" APR_OFF_T_FMT
+ " bytes from %s", len, name);
+#endif
+ rv = ap_pass_brigade(c_o->output_filters, bb);
+ if (rv == APR_SUCCESS) {
+ ap_fflush(c_o->output_filters, bb);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02441)
+ "error on %s - ap_pass_brigade",
+ name);
+ }
+ } else if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(02442)
+ "error on %s - ap_get_brigade",
+ name);
+ }
+ } while (rv == APR_SUCCESS);
+
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ rv = APR_SUCCESS;
+ }
+ return rv;
+}
+
+/* Search thru the input filters and remove the reqtimeout one */
+static void remove_reqtimeout(ap_filter_t *next)
+{
+ ap_filter_t *reqto = NULL;
+ ap_filter_rec_t *filter;
+
+ filter = ap_get_input_filter_handle("reqtimeout");
+ if (!filter) {
+ return;
+ }
+
+ while (next) {
+ if (next->frec == filter) {
+ reqto = next;
+ break;
+ }
+ next = next->next;
+ }
+ if (reqto) {
+ ap_remove_input_filter(reqto);
+ }
+}
+
+/*
+ * process the request and write the response.
+ */
+static int ap_proxy_wstunnel_request(apr_pool_t *p, request_rec *r,
+ proxy_conn_rec *conn,
+ proxy_worker *worker,
+ proxy_server_conf *conf,
+ apr_uri_t *uri,
+ char *url, char *server_portstr)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_pollset_t *pollset;
+ apr_pollfd_t pollfd;
+ const apr_pollfd_t *signalled;
+ apr_int32_t pollcnt, pi;
+ apr_int16_t pollevent;
+ conn_rec *c = r->connection;
+ apr_socket_t *sock = conn->sock;
+ conn_rec *backconn = conn->connection;
+ int client_error = 0;
+ char *buf;
+ apr_bucket_brigade *header_brigade;
+ apr_bucket *e;
+ char *old_cl_val = NULL;
+ char *old_te_val = NULL;
+ apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
+ apr_socket_t *client_socket = ap_get_conn_socket(c);
+
+ header_brigade = apr_brigade_create(p, backconn->bucket_alloc);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "sending request");
+
+ rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, conn,
+ worker, conf, uri, url, server_portstr,
+ &old_cl_val, &old_te_val);
+ if (rv != OK) {
+ return rv;
+ }
+
+ buf = apr_pstrcat(p, "Upgrade: WebSocket", CRLF, "Connection: Upgrade", CRLF, CRLF, NULL);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+
+ if ((rv = ap_proxy_pass_brigade(c->bucket_alloc, r, conn, backconn,
+ header_brigade, 1)) != OK)
+ return rv;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");
+
+ if ((rv = apr_pollset_create(&pollset, 2, p, 0)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02443)
+ "error apr_pollset_create()");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+#if 0
+ apr_socket_opt_set(sock, APR_SO_NONBLOCK, 1);
+ apr_socket_opt_set(sock, APR_SO_KEEPALIVE, 1);
+ apr_socket_opt_set(client_socket, APR_SO_NONBLOCK, 1);
+ apr_socket_opt_set(client_socket, APR_SO_KEEPALIVE, 1);
+#endif
+
+ pollfd.p = p;
+ pollfd.desc_type = APR_POLL_SOCKET;
+ pollfd.reqevents = APR_POLLIN;
+ pollfd.desc.s = sock;
+ pollfd.client_data = NULL;
+ apr_pollset_add(pollset, &pollfd);
+
+ pollfd.desc.s = client_socket;
+ apr_pollset_add(pollset, &pollfd);
+
+
+ r->output_filters = c->output_filters;
+ r->proto_output_filters = c->output_filters;
+ r->input_filters = c->input_filters;
+ r->proto_input_filters = c->input_filters;
+
+ remove_reqtimeout(r->input_filters);
+
+ while (1) { /* Infinite loop until error (one side closes the connection) */
+ if ((rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled))
+ != APR_SUCCESS) {
+ if (APR_STATUS_IS_EINTR(rv)) {
+ continue;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02444) "error apr_poll()");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02445)
+ "woke from poll(), i=%d", pollcnt);
+
+ for (pi = 0; pi < pollcnt; pi++) {
+ const apr_pollfd_t *cur = &signalled[pi];
+
+ if (cur->desc.s == sock) {
+ pollevent = cur->rtnevents;
+ if (pollevent & APR_POLLIN) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02446)
+ "sock was readable");
+ rv = proxy_wstunnel_transfer(r, backconn, c, bb, "sock");
+ }
+ else if ((pollevent & APR_POLLERR)
+ || (pollevent & APR_POLLHUP)) {
+ rv = APR_EPIPE;
+ ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02447)
+ "err/hup on backconn");
+ }
+ if (rv != APR_SUCCESS)
+ client_error = 1;
+ }
+ else if (cur->desc.s == client_socket) {
+ pollevent = cur->rtnevents;
+ if (pollevent & APR_POLLIN) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02448)
+ "client was readable");
+ rv = proxy_wstunnel_transfer(r, c, backconn, bb, "client");
+ }
+ }
+ else {
+ rv = APR_EBADF;
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02449)
+ "unknown socket in pollset");
+ }
+
+ }
+ if (rv != APR_SUCCESS) {
+ break;
+ }
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "finished with poll() - cleaning up");
+
+ if (client_error) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ return OK;
+}
+
+/*
+ */
+static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker,
+ proxy_server_conf *conf,
+ char *url, const char *proxyname,
+ apr_port_t proxyport)
+{
+ int status;
+ char server_portstr[32];
+ proxy_conn_rec *backend = NULL;
+ char *scheme;
+ int retry;
+ conn_rec *c = r->connection;
+ apr_pool_t *p = r->pool;
+ apr_uri_t *uri;
+
+ if (strncasecmp(url, "wss:", 4) == 0) {
+ scheme = "WSS";
+ }
+ else if (strncasecmp(url, "ws:", 3) == 0) {
+ scheme = "WS";
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02450) "declining URL %s", url);
+ return DECLINED;
+ }
+
+ uri = apr_palloc(p, sizeof(*uri));
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02451) "serving URL %s", url);
+
+ /* create space for state information */
+ status = ap_proxy_acquire_connection(scheme, &backend, worker,
+ r->server);
+ if (status != OK) {
+ if (backend) {
+ backend->close = 1;
+ ap_proxy_release_connection(scheme, backend, r->server);
+ }
+ return status;
+ }
+
+ backend->is_ssl = 0;
+ backend->close = 0;
+
+ retry = 0;
+ while (retry < 2) {
+ char *locurl = url;
+ /* Step One: Determine Who To Connect To */
+ status = ap_proxy_determine_connection(p, r, conf, worker, backend,
+ uri, &locurl, proxyname, proxyport,
+ server_portstr,
+ sizeof(server_portstr));
+
+ if (status != OK)
+ break;
+
+ /* Step Two: Make the Connection */
+ if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452)
+ "failed to make connection to backend: %s",
+ backend->hostname);
+ status = HTTP_SERVICE_UNAVAILABLE;
+ break;
+ }
+ /* Step Three: Create conn_rec */
+ if (!backend->connection) {
+ if ((status = ap_proxy_connection_create(scheme, backend,
+ c, r->server)) != OK)
+ break;
+ }
+
+ /* Step Three: Process the Request */
+ status = ap_proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl,
+ server_portstr);
+ break;
+ }
+
+ /* Do not close the socket */
+ ap_proxy_release_connection(scheme, backend, r->server);
+ return status;
+}
+
+static void ap_proxy_http_register_hook(apr_pool_t *p)
+{
+ proxy_hook_scheme_handler(proxy_wstunnel_handler, NULL, NULL, APR_HOOK_FIRST);
+ proxy_hook_canon_handler(proxy_wstunnel_canon, NULL, NULL, APR_HOOK_FIRST);
+}
+
+AP_DECLARE_MODULE(proxy_wstunnel) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ ap_proxy_http_register_hook /* register hooks */
+};
diff --git a/modules/proxy/mod_proxy_wstunnel.dsp b/modules/proxy/mod_proxy_wstunnel.dsp
new file mode 100644
index 00000000..7123bd65
--- /dev/null
+++ b/modules/proxy/mod_proxy_wstunnel.dsp
@@ -0,0 +1,123 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_wstunnel" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_wstunnel - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_wstunnel.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_wstunnel.mak" CFG="mod_proxy_wstunnel - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_wstunnel - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_wstunnel - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_wstunnel - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Release\mod_proxy_wstunnel_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_proxy_wstunnel.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_wstunnel.so" /d LONG_NAME="proxy_wstunnel_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /out:".\Release\mod_proxy_wstunnel.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_wstunnel.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Release\mod_proxy_wstunnel.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_wstunnel.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_proxy_wstunnel.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_proxy_wstunnel - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /Fd"Debug\mod_proxy_wstunnel_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x809 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_proxy_wstunnel.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_wstunnel.so" /d LONG_NAME="proxy_wstunnel_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_proxy_wstunnel.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_wstunnel.so
+# ADD LINK32 kernel32.lib ws2_32.lib mswsock.lib /nologo /subsystem:windows /dll /incremental:no /debug /out:".\Debug\mod_proxy_wstunnel.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_wstunnel.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_proxy_wstunnel.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_wstunnel - Win32 Release"
+# Name "mod_proxy_wstunnel - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;hpj;bat;for;f90"
+# Begin Source File
+
+SOURCE=.\mod_proxy_wstunnel.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter ".h"
+# Begin Source File
+
+SOURCE=.\mod_proxy.h
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index 5ab5d914..67dc9394 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -69,6 +69,8 @@ static int lb_workers_limit = 0;
const apr_strmatch_pattern PROXY_DECLARE_DATA *ap_proxy_strmatch_path;
const apr_strmatch_pattern PROXY_DECLARE_DATA *ap_proxy_strmatch_domain;
+extern apr_global_mutex_t *proxy_mutex;
+
static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r);
static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r);
static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r);
@@ -1730,12 +1732,14 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
else {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
"initializing worker %s local", worker->s->name);
+ apr_global_mutex_lock(proxy_mutex);
/* Now init local worker data */
if (worker->tmutex == NULL) {
rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00928)
"can not create worker thread mutex");
+ apr_global_mutex_unlock(proxy_mutex);
return rv;
}
}
@@ -1744,6 +1748,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
if (worker->cp == NULL) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
"can not create connection pool");
+ apr_global_mutex_unlock(proxy_mutex);
return APR_EGENERAL;
}
@@ -1779,6 +1784,8 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
"initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
getpid(), worker->s->hostname);
}
+ apr_global_mutex_unlock(proxy_mutex);
+
}
if (rv == APR_SUCCESS) {
worker->s->status |= (PROXY_WORKER_INITIALIZED);
@@ -2316,7 +2323,7 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend,
nbytes = sizeof(drain_buffer) - 1;
while (status == APR_SUCCESS && nbytes) {
status = apr_socket_recv(backend->sock, drain_buffer, &nbytes);
- buffer[nbytes] = '\0';
+ drain_buffer[nbytes] = '\0';
nbytes = sizeof(drain_buffer) - 1;
if (strstr(drain_buffer, "\r\n\r\n") != NULL) {
break;
@@ -2766,15 +2773,18 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec
}
if (!found) {
proxy_worker **runtime;
+ apr_global_mutex_lock(proxy_mutex);
runtime = apr_array_push(b->workers);
*runtime = apr_palloc(conf->pool, sizeof(proxy_worker));
+ apr_global_mutex_unlock(proxy_mutex);
(*runtime)->hash = shm->hash;
(*runtime)->context = NULL;
(*runtime)->cp = NULL;
(*runtime)->balancer = b;
(*runtime)->s = shm;
(*runtime)->tmutex = NULL;
- if ((rv = ap_proxy_initialize_worker(*runtime, s, conf->pool)) != APR_SUCCESS) {
+ rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
+ if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");
return rv;
}
@@ -2834,7 +2844,365 @@ PROXY_DECLARE(proxy_balancer_shared *) ap_proxy_find_balancershm(ap_slotmem_prov
return NULL;
}
+typedef struct header_connection {
+ apr_pool_t *pool;
+ apr_array_header_t *array;
+ const char *first;
+ unsigned int closed:1;
+} header_connection;
+
+static int find_conn_headers(void *data, const char *key, const char *val)
+{
+ header_connection *x = data;
+ const char *name;
+
+ do {
+ while (*val == ',') {
+ val++;
+ }
+ name = ap_get_token(x->pool, &val, 0);
+ if (!strcasecmp(name, "close")) {
+ x->closed = 1;
+ }
+ if (!x->first) {
+ x->first = name;
+ }
+ else {
+ const char **elt;
+ if (!x->array) {
+ x->array = apr_array_make(x->pool, 4, sizeof(char *));
+ }
+ elt = apr_array_push(x->array);
+ *elt = name;
+ }
+ } while (*val);
+
+ return 1;
+}
+
+/**
+ * Remove all headers referred to by the Connection header.
+ */
+static int ap_proxy_clear_connection(request_rec *r, apr_table_t *headers)
+{
+ const char **name;
+ header_connection x;
+
+ x.pool = r->pool;
+ x.array = NULL;
+ x.first = NULL;
+ x.closed = 0;
+
+ apr_table_unset(headers, "Proxy-Connection");
+
+ apr_table_do(find_conn_headers, &x, headers, "Connection", NULL);
+ if (x.first) {
+ /* fast path - no memory allocated for one header */
+ apr_table_unset(headers, "Connection");
+ apr_table_unset(headers, x.first);
+ }
+ if (x.array) {
+ /* two or more headers */
+ while ((name = apr_array_pop(x.array))) {
+ apr_table_unset(headers, *name);
+ }
+ }
+
+ return x.closed;
+}
+
+PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
+ apr_bucket_brigade *header_brigade,
+ request_rec *r,
+ proxy_conn_rec *p_conn,
+ proxy_worker *worker,
+ proxy_server_conf *conf,
+ apr_uri_t *uri,
+ char *url, char *server_portstr,
+ char **old_cl_val,
+ char **old_te_val)
+{
+ conn_rec *c = r->connection;
+ int counter;
+ char *buf;
+ const apr_array_header_t *headers_in_array;
+ const apr_table_entry_t *headers_in;
+ apr_table_t *headers_in_copy;
+ apr_bucket *e;
+ int do_100_continue;
+ conn_rec *origin = p_conn->connection;
+ proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+
+ /*
+ * To be compliant, we only use 100-Continue for requests with bodies.
+ * We also make sure we won't be talking HTTP/1.0 as well.
+ */
+ do_100_continue = (worker->s->ping_timeout_set
+ && ap_request_has_body(r)
+ && (PROXYREQ_REVERSE == r->proxyreq)
+ && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
+
+ if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
+ /*
+ * According to RFC 2616 8.2.3 we are not allowed to forward an
+ * Expect: 100-continue to an HTTP/1.0 server. Instead we MUST return
+ * a HTTP_EXPECTATION_FAILED
+ */
+ if (r->expecting_100) {
+ return HTTP_EXPECTATION_FAILED;
+ }
+ buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL);
+ p_conn->close = 1;
+ } else {
+ buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL);
+ }
+ if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) {
+ origin->keepalive = AP_CONN_CLOSE;
+ p_conn->close = 1;
+ }
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ if (dconf->preserve_host == 0) {
+ if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
+ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
+ buf = apr_pstrcat(p, "Host: [", uri->hostname, "]:",
+ uri->port_str, CRLF, NULL);
+ } else {
+ buf = apr_pstrcat(p, "Host: [", uri->hostname, "]", CRLF, NULL);
+ }
+ } else {
+ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
+ buf = apr_pstrcat(p, "Host: ", uri->hostname, ":",
+ uri->port_str, CRLF, NULL);
+ } else {
+ buf = apr_pstrcat(p, "Host: ", uri->hostname, CRLF, NULL);
+ }
+ }
+ }
+ else {
+ /* don't want to use r->hostname, as the incoming header might have a
+ * port attached
+ */
+ const char* hostname = apr_table_get(r->headers_in,"Host");
+ if (!hostname) {
+ hostname = r->server->server_hostname;
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
+ "no HTTP 0.9 request (with no host line) "
+ "on incoming request and preserve host set "
+ "forcing hostname to be %s for uri %s",
+ hostname, r->uri);
+ }
+ buf = apr_pstrcat(p, "Host: ", hostname, CRLF, NULL);
+ }
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+
+ /* handle Via */
+ if (conf->viaopt == via_block) {
+ /* Block all outgoing Via: headers */
+ apr_table_unset(r->headers_in, "Via");
+ } else if (conf->viaopt != via_off) {
+ const char *server_name = ap_get_server_name(r);
+ /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
+ * then the server name returned by ap_get_server_name() is the
+ * origin server name (which does make too much sense with Via: headers)
+ * so we use the proxy vhost's name instead.
+ */
+ if (server_name == r->hostname)
+ server_name = r->server->server_hostname;
+ /* Create a "Via:" request header entry and merge it */
+ /* Generate outgoing Via: header with/without server comment: */
+ apr_table_mergen(r->headers_in, "Via",
+ (conf->viaopt == via_full)
+ ? apr_psprintf(p, "%d.%d %s%s (%s)",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, server_portstr,
+ AP_SERVER_BASEVERSION)
+ : apr_psprintf(p, "%d.%d %s%s",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, server_portstr)
+ );
+ }
+
+ /* Use HTTP/1.1 100-Continue as quick "HTTP ping" test
+ * to backend
+ */
+ if (do_100_continue) {
+ apr_table_mergen(r->headers_in, "Expect", "100-Continue");
+ r->expecting_100 = 1;
+ }
+
+ /* X-Forwarded-*: handling
+ *
+ * XXX Privacy Note:
+ * -----------------
+ *
+ * These request headers are only really useful when the mod_proxy
+ * is used in a reverse proxy configuration, so that useful info
+ * about the client can be passed through the reverse proxy and on
+ * to the backend server, which may require the information to
+ * function properly.
+ *
+ * In a forward proxy situation, these options are a potential
+ * privacy violation, as information about clients behind the proxy
+ * are revealed to arbitrary servers out there on the internet.
+ *
+ * The HTTP/1.1 Via: header is designed for passing client
+ * information through proxies to a server, and should be used in
+ * a forward proxy configuation instead of X-Forwarded-*. See the
+ * ProxyVia option for details.
+ */
+ if (dconf->add_forwarded_headers) {
+ if (PROXYREQ_REVERSE == r->proxyreq) {
+ const char *buf;
+
+ /* Add X-Forwarded-For: so that the upstream has a chance to
+ * determine, where the original request came from.
+ */
+ apr_table_mergen(r->headers_in, "X-Forwarded-For",
+ r->useragent_ip);
+
+ /* Add X-Forwarded-Host: so that upstream knows what the
+ * original request hostname was.
+ */
+ if ((buf = apr_table_get(r->headers_in, "Host"))) {
+ apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
+ }
+
+ /* Add X-Forwarded-Server: so that upstream knows what the
+ * name of this proxy server is (if there are more than one)
+ * XXX: This duplicates Via: - do we strictly need it?
+ */
+ apr_table_mergen(r->headers_in, "X-Forwarded-Server",
+ r->server->server_hostname);
+ }
+ }
+
+ proxy_run_fixups(r);
+ /*
+ * Make a copy of the headers_in table before clearing the connection
+ * headers as we need the connection headers later in the http output
+ * filter to prepare the correct response headers.
+ *
+ * Note: We need to take r->pool for apr_table_copy as the key / value
+ * pairs in r->headers_in have been created out of r->pool and
+ * p might be (and actually is) a longer living pool.
+ * This would trigger the bad pool ancestry abort in apr_table_copy if
+ * apr is compiled with APR_POOL_DEBUG.
+ */
+ headers_in_copy = apr_table_copy(r->pool, r->headers_in);
+ ap_proxy_clear_connection(r, headers_in_copy);
+ /* send request headers */
+ headers_in_array = apr_table_elts(headers_in_copy);
+ headers_in = (const apr_table_entry_t *) headers_in_array->elts;
+ for (counter = 0; counter < headers_in_array->nelts; counter++) {
+ if (headers_in[counter].key == NULL
+ || headers_in[counter].val == NULL
+
+ /* Already sent */
+ || !strcasecmp(headers_in[counter].key, "Host")
+
+ /* Clear out hop-by-hop request headers not to send
+ * RFC2616 13.5.1 says we should strip these headers
+ */
+ || !strcasecmp(headers_in[counter].key, "Keep-Alive")
+ || !strcasecmp(headers_in[counter].key, "TE")
+ || !strcasecmp(headers_in[counter].key, "Trailer")
+ || !strcasecmp(headers_in[counter].key, "Upgrade")
+
+ ) {
+ continue;
+ }
+ /* Do we want to strip Proxy-Authorization ?
+ * If we haven't used it, then NO
+ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
+ * So let's make it configurable by env.
+ */
+ if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) {
+ if (r->user != NULL) { /* we've authenticated */
+ if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
+ continue;
+ }
+ }
+ }
+
+ /* Skip Transfer-Encoding and Content-Length for now.
+ */
+ if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) {
+ *old_te_val = headers_in[counter].val;
+ continue;
+ }
+ if (!strcasecmp(headers_in[counter].key, "Content-Length")) {
+ *old_cl_val = headers_in[counter].val;
+ continue;
+ }
+
+ /* for sub-requests, ignore freshness/expiry headers */
+ if (r->main) {
+ if ( !strcasecmp(headers_in[counter].key, "If-Match")
+ || !strcasecmp(headers_in[counter].key, "If-Modified-Since")
+ || !strcasecmp(headers_in[counter].key, "If-Range")
+ || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since")
+ || !strcasecmp(headers_in[counter].key, "If-None-Match")) {
+ continue;
+ }
+ }
+
+ buf = apr_pstrcat(p, headers_in[counter].key, ": ",
+ headers_in[counter].val, CRLF,
+ NULL);
+ ap_xlate_proto_to_ascii(buf, strlen(buf));
+ e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+ }
+ return OK;
+}
+
+PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ request_rec *r, proxy_conn_rec *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
+ int flush)
+{
+ apr_status_t status;
+ apr_off_t transferred;
+
+ if (flush) {
+ apr_bucket *e = apr_bucket_flush_create(bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+ apr_brigade_length(bb, 0, &transferred);
+ if (transferred != -1)
+ p_conn->worker->s->transferred += transferred;
+ status = ap_pass_brigade(origin->output_filters, bb);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01084)
+ "pass request body failed to %pI (%s)",
+ p_conn->addr, p_conn->hostname);
+ if (origin->aborted) {
+ const char *ssl_note;
+
+ if (((ssl_note = apr_table_get(origin->notes, "SSL_connect_rv"))
+ != NULL) && (strcmp(ssl_note, "err") == 0)) {
+ return ap_proxyerror(r, HTTP_INTERNAL_SERVER_ERROR,
+ "Error during SSL Handshake with"
+ " remote server");
+ }
+ return APR_STATUS_IS_TIMEUP(status) ? HTTP_GATEWAY_TIME_OUT : HTTP_BAD_GATEWAY;
+ }
+ else {
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ apr_brigade_cleanup(bb);
+ return OK;
+}
+
void proxy_util_register_hooks(apr_pool_t *p)
{
APR_REGISTER_OPTIONAL_FN(ap_proxy_retry_worker);
+ APR_REGISTER_OPTIONAL_FN(ap_proxy_clear_connection);
}
diff --git a/modules/session/NWGNUmakefile b/modules/session/NWGNUmakefile
index 8adaac59..85f3a311 100644
--- a/modules/session/NWGNUmakefile
+++ b/modules/session/NWGNUmakefile
@@ -18,9 +18,11 @@ include $(AP_WORK)/build/NWGNUhead.inc
#
# Make sure all needed macro's are defined
#
-ifneq ($(MAKECMDGOALS),clean)
+ifneq "$(MAKECMDGOALS)" "clean"
+ifneq "$(findstring clobber_,$(MAKECMDGOALS))" "clobber_"
APU_HAVE_CRYPTO = $(shell $(AWK) '/^\#define APU_HAVE_CRYPTO/{print $$3}' $(APRUTIL)/include/apu.h)
endif
+endif
#
# These directories will be at the beginning of the include list, followed by
diff --git a/modules/session/mod_session.c b/modules/session/mod_session.c
index a3354a59..7213eb3c 100644
--- a/modules/session/mod_session.c
+++ b/modules/session/mod_session.c
@@ -132,8 +132,6 @@ static apr_status_t ap_session_load(request_rec * r, session_rec ** z)
zz = (session_rec *) apr_pcalloc(r->pool, sizeof(session_rec));
zz->pool = r->pool;
zz->entries = apr_table_make(zz->pool, 10);
- zz->uuid = (apr_uuid_t *) apr_pcalloc(zz->pool, sizeof(apr_uuid_t));
- apr_uuid_get(zz->uuid);
}
else {
@@ -446,6 +444,7 @@ static apr_status_t session_output_filter(ap_filter_t * f,
}
if (override) {
z->encoded = override;
+ z->dirty = 1;
session_identity_decode(r, z);
}
}
diff --git a/modules/session/mod_session_cookie.c b/modules/session/mod_session_cookie.c
index 15b3d9c6..6a02322b 100644
--- a/modules/session/mod_session_cookie.c
+++ b/modules/session/mod_session_cookie.c
@@ -157,7 +157,6 @@ static apr_status_t session_cookie_load(request_rec * r, session_rec ** z)
zz->pool = m->pool;
zz->entries = apr_table_make(m->pool, 10);
zz->encoded = val;
- zz->uuid = (apr_uuid_t *) apr_pcalloc(m->pool, sizeof(apr_uuid_t));
*z = zz;
/* put the session in the notes so we don't have to parse it again */
diff --git a/modules/session/mod_session_crypto.c b/modules/session/mod_session_crypto.c
index f48ff038..03dbba61 100644
--- a/modules/session/mod_session_crypto.c
+++ b/modules/session/mod_session_crypto.c
@@ -556,9 +556,8 @@ static const char *set_crypto_passphrase_file(cmd_parms *cmd, void *config,
filename = ap_server_root_relative(cmd->temp_pool, filename);
rv = ap_pcfg_openfile(&file, cmd->temp_pool, filename);
if (rv != APR_SUCCESS) {
- return apr_psprintf(cmd->pool, "%s: Could not open file %s: %s",
- cmd->cmd->name, filename,
- apr_strerror(rv, buffer, sizeof(buffer)));
+ return apr_psprintf(cmd->pool, "%s: Could not open file %s: %pm",
+ cmd->cmd->name, filename, &rv);
}
while (!(ap_cfg_getline(buffer, sizeof(buffer), file))) {
diff --git a/modules/session/mod_session_dbd.c b/modules/session/mod_session_dbd.c
index d6349a8d..a6ab40ea 100644
--- a/modules/session/mod_session_dbd.c
+++ b/modules/session/mod_session_dbd.c
@@ -230,12 +230,11 @@ static apr_status_t session_dbd_load(request_rec * r, session_rec ** z)
zz = (session_rec *) apr_pcalloc(r->pool, sizeof(session_rec));
zz->pool = r->pool;
zz->entries = apr_table_make(zz->pool, 10);
- zz->uuid = (apr_uuid_t *) apr_pcalloc(zz->pool, sizeof(apr_uuid_t));
- if (key) {
- apr_uuid_parse(zz->uuid, key);
- }
- else {
- apr_uuid_get(zz->uuid);
+ if (key && val) {
+ apr_uuid_t *uuid = apr_pcalloc(zz->pool, sizeof(apr_uuid_t));
+ if (APR_SUCCESS == apr_uuid_parse(uuid, key)) {
+ zz->uuid = uuid;
+ }
}
zz->encoded = val;
*z = zz;
@@ -250,8 +249,8 @@ static apr_status_t session_dbd_load(request_rec * r, session_rec ** z)
/**
* Save the session by the key specified.
*/
-static apr_status_t dbd_save(request_rec * r, const char *key, const char *val,
- apr_int64_t expiry)
+static apr_status_t dbd_save(request_rec * r, const char *oldkey,
+ const char *newkey, const char *val, apr_int64_t expiry)
{
apr_status_t rv;
@@ -272,22 +271,24 @@ static apr_status_t dbd_save(request_rec * r, const char *key, const char *val,
if (rv) {
return rv;
}
- rv = apr_dbd_pvbquery(dbd->driver, r->pool, dbd->handle, &rows, statement,
- val, &expiry, key, NULL);
- if (rv) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01857)
- "query execution error updating session '%s' "
- "using database query '%s': %s", key, conf->updatelabel,
- apr_dbd_error(dbd->driver, dbd->handle, rv));
- return APR_EGENERAL;
- }
- /*
- * if some rows were updated it means a session existed and was updated,
- * so we are done.
- */
- if (rows != 0) {
- return APR_SUCCESS;
+ if (oldkey) {
+ rv = apr_dbd_pvbquery(dbd->driver, r->pool, dbd->handle, &rows,
+ statement, val, &expiry, newkey, oldkey, NULL);
+ if (rv) {
+ ap_log_rerror(
+ APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01857) "query execution error updating session '%s' "
+ "using database query '%s': %s/%s", oldkey, newkey, conf->updatelabel, apr_dbd_error(dbd->driver, dbd->handle, rv));
+ return APR_EGENERAL;
+ }
+
+ /*
+ * if some rows were updated it means a session existed and was updated,
+ * so we are done.
+ */
+ if (rows != 0) {
+ return APR_SUCCESS;
+ }
}
if (conf->insertlabel == NULL) {
@@ -301,11 +302,11 @@ static apr_status_t dbd_save(request_rec * r, const char *key, const char *val,
return rv;
}
rv = apr_dbd_pvbquery(dbd->driver, r->pool, dbd->handle, &rows, statement,
- val, &expiry, key, NULL);
+ val, &expiry, newkey, NULL);
if (rv) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01859)
"query execution error inserting session '%s' "
- "in database with '%s': %s", key, conf->insertlabel,
+ "in database with '%s': %s", newkey, conf->insertlabel,
apr_dbd_error(dbd->driver, dbd->handle, rv));
return APR_EGENERAL;
}
@@ -320,7 +321,7 @@ static apr_status_t dbd_save(request_rec * r, const char *key, const char *val,
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01860)
"the session insert query did not cause any rows to be added "
- "to the database for session '%s', session not inserted", key);
+ "to the database for session '%s', session not inserted", newkey);
return APR_EGENERAL;
@@ -397,27 +398,38 @@ static apr_status_t dbd_clean(apr_pool_t *p, server_rec *s)
static apr_status_t session_dbd_save(request_rec * r, session_rec * z)
{
- char *buffer;
apr_status_t ret = APR_SUCCESS;
session_dbd_dir_conf *conf = ap_get_module_config(r->per_dir_config,
&session_dbd_module);
/* support anonymous sessions */
if (conf->name_set || conf->name2_set) {
+ char *oldkey = NULL, *newkey = NULL;
/* don't cache pages with a session */
apr_table_addn(r->headers_out, "Cache-Control", "no-cache");
- /* must we create a uuid? */
- buffer = apr_pcalloc(r->pool, APR_UUID_FORMATTED_LENGTH + 1);
- apr_uuid_format(buffer, z->uuid);
+ /* if the session is new or changed, make a new session ID */
+ if (z->uuid) {
+ oldkey = apr_pcalloc(r->pool, APR_UUID_FORMATTED_LENGTH + 1);
+ apr_uuid_format(oldkey, z->uuid);
+ }
+ if (z->dirty || !oldkey) {
+ z->uuid = apr_pcalloc(z->pool, sizeof(apr_uuid_t));
+ apr_uuid_get(z->uuid);
+ newkey = apr_pcalloc(r->pool, APR_UUID_FORMATTED_LENGTH + 1);
+ apr_uuid_format(newkey, z->uuid);
+ }
+ else {
+ newkey = oldkey;
+ }
/* save the session with the uuid as key */
if (z->encoded && z->encoded[0]) {
- ret = dbd_save(r, buffer, z->encoded, z->expiry);
+ ret = dbd_save(r, oldkey, newkey, z->encoded, z->expiry);
}
else {
- ret = dbd_remove(r, buffer);
+ ret = dbd_remove(r, oldkey);
}
if (ret != APR_SUCCESS) {
return ret;
@@ -425,13 +437,13 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z)
/* create RFC2109 compliant cookie */
if (conf->name_set) {
- ap_cookie_write(r, conf->name, buffer, conf->name_attrs, z->maxage,
+ ap_cookie_write(r, conf->name, newkey, conf->name_attrs, z->maxage,
r->headers_out, r->err_headers_out, NULL);
}
/* create RFC2965 compliant cookie */
if (conf->name2_set) {
- ap_cookie_write2(r, conf->name2, buffer, conf->name2_attrs, z->maxage,
+ ap_cookie_write2(r, conf->name2, newkey, conf->name2_attrs, z->maxage,
r->headers_out, r->err_headers_out, NULL);
}
@@ -446,7 +458,7 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z)
apr_table_addn(r->headers_out, "Cache-Control", "no-cache");
if (r->user) {
- ret = dbd_save(r, r->user, z->encoded, z->expiry);
+ ret = dbd_save(r, r->user, r->user, z->encoded, z->expiry);
if (ret != APR_SUCCESS) {
return ret;
}
diff --git a/modules/slotmem/mod_slotmem_shm.c b/modules/slotmem/mod_slotmem_shm.c
index 2edcea7f..aac96e23 100644
--- a/modules/slotmem/mod_slotmem_shm.c
+++ b/modules/slotmem/mod_slotmem_shm.c
@@ -91,7 +91,7 @@ static apr_status_t unixd_set_shm_perms(const char *fname)
{
#ifdef AP_NEED_SET_MUTEX_PERMS
#if APR_USE_SHMEM_SHMGET || APR_USE_SHMEM_SHMGET_ANON
- struct shmid_ds shmbuf;
+ struct shmid_ds shmbuf = { { 0 } };
key_t shmkey;
int shmid;
@@ -647,7 +647,7 @@ static apr_status_t slotmem_fgrab(ap_slotmem_instance_t *slot, unsigned int id)
}
if (id >= slot->desc.num) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02236)
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02397)
"slotmem(%s) fgrab failed. Num %u/num_free %u",
slot->name, slotmem_num_slots(slot),
slotmem_num_free_slots(slot));
diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c
index 12d473ba..a43f656e 100644
--- a/modules/ssl/mod_ssl.c
+++ b/modules/ssl/mod_ssl.c
@@ -200,9 +200,12 @@ static const command_rec ssl_config_cmds[] = {
"of the client certificate "
"(`/path/to/file' - PEM encoded certificates)")
SSL_CMD_SRV(ProxyCheckPeerExpire, FLAG,
- "SSL Proxy: check the peers certificate expiration date")
+ "SSL Proxy: check the peer certificate's expiration date")
SSL_CMD_SRV(ProxyCheckPeerCN, FLAG,
- "SSL Proxy: check the peers certificate CN")
+ "SSL Proxy: check the peer certificate's CN")
+ SSL_CMD_SRV(ProxyCheckPeerName, FLAG,
+ "SSL Proxy: check the peer certificate's name "
+ "(must be present in subjectAltName extension or CN")
/*
* Per-directory context configuration directives
diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c
index 658ef6b3..1ab5bf37 100644
--- a/modules/ssl/ssl_engine_config.c
+++ b/modules/ssl/ssl_engine_config.c
@@ -207,6 +207,7 @@ static SSLSrvConfigRec *ssl_config_server_new(apr_pool_t *p)
sc->insecure_reneg = UNSET;
sc->proxy_ssl_check_peer_expire = SSL_ENABLED_UNSET;
sc->proxy_ssl_check_peer_cn = SSL_ENABLED_UNSET;
+ sc->proxy_ssl_check_peer_name = SSL_ENABLED_UNSET;
#ifndef OPENSSL_NO_TLSEXT
sc->strict_sni_vhost_check = SSL_ENABLED_UNSET;
#endif
@@ -336,6 +337,7 @@ void *ssl_config_server_merge(apr_pool_t *p, void *basev, void *addv)
cfgMergeBool(insecure_reneg);
cfgMerge(proxy_ssl_check_peer_expire, SSL_ENABLED_UNSET);
cfgMerge(proxy_ssl_check_peer_cn, SSL_ENABLED_UNSET);
+ cfgMerge(proxy_ssl_check_peer_name, SSL_ENABLED_UNSET);
#ifndef OPENSSL_NO_TLSEXT
cfgMerge(strict_sni_vhost_check, SSL_ENABLED_UNSET);
#endif
@@ -1632,6 +1634,15 @@ const char *ssl_cmd_SSLProxyCheckPeerCN(cmd_parms *cmd, void *dcfg, int flag)
return NULL;
}
+const char *ssl_cmd_SSLProxyCheckPeerName(cmd_parms *cmd, void *dcfg, int flag)
+{
+ SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
+
+ sc->proxy_ssl_check_peer_name = flag ? SSL_ENABLED_TRUE : SSL_ENABLED_FALSE;
+
+ return NULL;
+}
+
const char *ssl_cmd_SSLStrictSNIVHostCheck(cmd_parms *cmd, void *dcfg, int flag)
{
#ifndef OPENSSL_NO_TLSEXT
diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c
index be891d62..9f8dcc8f 100644
--- a/modules/ssl/ssl_engine_init.c
+++ b/modules/ssl/ssl_engine_init.c
@@ -91,7 +91,7 @@ static int ssl_tmp_key_init_rsa(server_rec *s,
if (FIPS_mode() && bits < 1024) {
mc->pTmpKeys[idx] = NULL;
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01877)
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01877)
"Init: Skipping generating temporary "
"%d bit RSA private key in FIPS mode", bits);
return OK;
@@ -140,7 +140,7 @@ static int ssl_tmp_key_init_dh(server_rec *s,
if (FIPS_mode() && bits < 1024) {
mc->pTmpKeys[idx] = NULL;
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01880)
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01880)
"Init: Skipping generating temporary "
"%d bit DH parameters in FIPS mode", bits);
return OK;
@@ -354,7 +354,7 @@ int ssl_init_Module(apr_pool_t *p, apr_pool_t *plog,
}
}
else {
- ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(01886)
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01886)
"SSL FIPS mode disabled");
}
#endif
@@ -1110,7 +1110,6 @@ static void ssl_check_public_cert(server_rec *s,
int type)
{
int is_ca, pathlen;
- apr_array_header_t *ids;
if (!cert) {
return;
@@ -1143,56 +1142,12 @@ static void ssl_check_public_cert(server_rec *s,
}
}
- /*
- * Check if the server name is covered by the certificate.
- * Consider both dNSName entries in the subjectAltName extension
- * and, as a fallback, commonName attributes in the subject DN.
- * (DNS-IDs and CN-IDs as defined in RFC 6125).
- */
- if (SSL_X509_getIDs(ptemp, cert, &ids)) {
- char *cp;
- int i;
- char **id = (char **)ids->elts;
- BOOL is_wildcard, matched = FALSE;
-
- for (i = 0; i < ids->nelts; i++) {
- if (!id[i])
- continue;
-
- /*
- * Determine if it is a wildcard ID - we're restrictive
- * in the sense that we require the wildcard character to be
- * THE left-most label (i.e., the ID must start with "*.")
- */
- is_wildcard = (*id[i] == '*' && *(id[i]+1) == '.') ? TRUE : FALSE;
-
- /*
- * If the ID includes a wildcard character, check if it matches
- * for the left-most DNS label (i.e., the wildcard character
- * is not allowed to match a dot). Otherwise, try a simple
- * string compare, case insensitively.
- */
- if ((is_wildcard == TRUE &&
- (cp = strchr(s->server_hostname, '.')) &&
- !strcasecmp(id[i]+1, cp)) ||
- !strcasecmp(id[i], s->server_hostname)) {
- matched = TRUE;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01908)
- "%sID '%s' in %s certificate configured "
- "for %s matches server name",
- is_wildcard ? "Wildcard " : "",
- id[i], ssl_asn1_keystr(type),
- (mySrvConfig(s))->vhost_id);
- break;
- }
- }
-
- if (matched == FALSE) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(01909)
- "%s certificate configured for %s does NOT include "
- "an ID which matches the server name",
- ssl_asn1_keystr(type), (mySrvConfig(s))->vhost_id);
- }
+ if (SSL_X509_match_name(ptemp, cert, (const char *)s->server_hostname,
+ TRUE, s) == FALSE) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(01909)
+ "%s certificate configured for %s does NOT include "
+ "an ID which matches the server name",
+ ssl_asn1_keystr(type), (mySrvConfig(s))->vhost_id);
}
}
@@ -1354,7 +1309,8 @@ static void ssl_init_proxy_certs(server_rec *s,
for (n = 0; n < ncerts; n++) {
X509_INFO *inf = sk_X509_INFO_value(sk, n);
- if (!inf->x509 || !inf->x_pkey) {
+ if (!inf->x509 || !inf->x_pkey || !inf->x_pkey->dec_pkey ||
+ inf->enc_data) {
sk_X509_INFO_free(sk);
ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, s, APLOGNO(02252)
"incomplete client cert configured for SSL proxy "
@@ -1362,6 +1318,15 @@ static void ssl_init_proxy_certs(server_rec *s,
ssl_die(s);
return;
}
+
+ if (X509_check_private_key(inf->x509, inf->x_pkey->dec_pkey) != 1) {
+ ssl_log_xerror(SSLLOG_MARK, APLOG_STARTUP, 0, ptemp, s, inf->x509,
+ APLOGNO(02326) "proxy client certificate and "
+ "private key do not match");
+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, s);
+ ssl_die(s);
+ return;
+ }
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02207)
@@ -1374,7 +1339,11 @@ static void ssl_init_proxy_certs(server_rec *s,
return;
}
- /* Load all of the CA certs and construct a chain */
+ /* If SSLProxyMachineCertificateChainFile is configured, load all
+ * the CA certs and have OpenSSL attempt to construct a full chain
+ * from each configured end-entity cert up to a root. This will
+ * allow selection of the correct cert given a list of root CA
+ * names in the certificate request from the server. */
pkp->ca_certs = (STACK_OF(X509) **) apr_pcalloc(p, ncerts * sizeof(sk));
sctx = X509_STORE_CTX_new();
diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c
index 83f3ab7f..d135bd38 100644
--- a/modules/ssl/ssl_engine_io.c
+++ b/modules/ssl/ssl_engine_io.c
@@ -1053,6 +1053,7 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
#endif
const char *hostname_note = apr_table_get(c->notes,
"proxy-request-hostname");
+ BOOL proxy_ssl_check_peer_ok = TRUE;
sc = mySrvConfig(server);
#ifndef OPENSSL_NO_TLSEXT
@@ -1090,26 +1091,32 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
return MODSSL_ERROR_BAD_GATEWAY;
}
+ cert = SSL_get_peer_certificate(filter_ctx->pssl);
+
if (sc->proxy_ssl_check_peer_expire != SSL_ENABLED_FALSE) {
- cert = SSL_get_peer_certificate(filter_ctx->pssl);
if (!cert
|| (X509_cmp_current_time(
X509_get_notBefore(cert)) >= 0)
|| (X509_cmp_current_time(
X509_get_notAfter(cert)) <= 0)) {
+ proxy_ssl_check_peer_ok = FALSE;
ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02004)
"SSL Proxy: Peer certificate is expired");
- if (cert) {
- X509_free(cert);
- }
- /* ensure that the SSL structures etc are freed, etc: */
- ssl_filter_io_shutdown(filter_ctx, c, 1);
- apr_table_setn(c->notes, "SSL_connect_rv", "err");
- return HTTP_BAD_GATEWAY;
}
- X509_free(cert);
}
- if ((sc->proxy_ssl_check_peer_cn != SSL_ENABLED_FALSE) &&
+ if ((sc->proxy_ssl_check_peer_name != SSL_ENABLED_FALSE) &&
+ hostname_note) {
+ apr_table_unset(c->notes, "proxy-request-hostname");
+ if (!cert
+ || SSL_X509_match_name(c->pool, cert, hostname_note,
+ TRUE, server) == FALSE) {
+ proxy_ssl_check_peer_ok = FALSE;
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02411)
+ "SSL Proxy: Peer certificate does not match "
+ "for hostname %s", hostname_note);
+ }
+ }
+ else if ((sc->proxy_ssl_check_peer_cn != SSL_ENABLED_FALSE) &&
hostname_note) {
const char *hostname;
int match = 0;
@@ -1128,17 +1135,25 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
}
if (!match) {
+ proxy_ssl_check_peer_ok = FALSE;
ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, APLOGNO(02005)
"SSL Proxy: Peer certificate CN mismatch:"
" Certificate CN: %s Requested hostname: %s",
hostname, hostname_note);
- /* ensure that the SSL structures etc are freed, etc: */
- ssl_filter_io_shutdown(filter_ctx, c, 1);
- apr_table_setn(c->notes, "SSL_connect_rv", "err");
- return HTTP_BAD_GATEWAY;
}
}
+ if (cert) {
+ X509_free(cert);
+ }
+
+ if (proxy_ssl_check_peer_ok != TRUE) {
+ /* ensure that the SSL structures etc are freed, etc: */
+ ssl_filter_io_shutdown(filter_ctx, c, 1);
+ apr_table_setn(c->notes, "SSL_connect_rv", "err");
+ return HTTP_BAD_GATEWAY;
+ }
+
apr_table_setn(c->notes, "SSL_connect_rv", "ok");
return APR_SUCCESS;
}
diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h
index a9269680..080ecef3 100644
--- a/modules/ssl/ssl_private.h
+++ b/modules/ssl/ssl_private.h
@@ -687,6 +687,7 @@ struct SSLSrvConfigRec {
modssl_ctx_t *proxy;
ssl_enabled_t proxy_ssl_check_peer_expire;
ssl_enabled_t proxy_ssl_check_peer_cn;
+ ssl_enabled_t proxy_ssl_check_peer_name;
#ifndef OPENSSL_NO_TLSEXT
ssl_enabled_t strict_sni_vhost_check;
#endif
@@ -782,6 +783,7 @@ const char *ssl_cmd_SSLSessionTicketKeyFile(cmd_parms *cmd, void *dcfg, const ch
#endif
const char *ssl_cmd_SSLProxyCheckPeerExpire(cmd_parms *cmd, void *dcfg, int flag);
const char *ssl_cmd_SSLProxyCheckPeerCN(cmd_parms *cmd, void *dcfg, int flag);
+const char *ssl_cmd_SSLProxyCheckPeerName(cmd_parms *cmd, void *dcfg, int flag);
const char *ssl_cmd_SSLOCSPOverrideResponder(cmd_parms *cmd, void *dcfg, int flag);
const char *ssl_cmd_SSLOCSPDefaultResponder(cmd_parms *cmd, void *dcfg, const char *arg);
diff --git a/modules/ssl/ssl_util_ocsp.c b/modules/ssl/ssl_util_ocsp.c
index e5c5e58d..757df05f 100644
--- a/modules/ssl/ssl_util_ocsp.c
+++ b/modules/ssl/ssl_util_ocsp.c
@@ -236,7 +236,7 @@ static OCSP_RESPONSE *read_response(apr_socket_t *sd, BIO *bio, conn_rec *c,
apr_bucket *e = APR_BRIGADE_FIRST(bb);
rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
- if (rv == APR_EOF || (rv == APR_SUCCESS && len == 0)) {
+ if (rv == APR_EOF) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(01984)
"OCSP response: got EOF");
break;
@@ -246,6 +246,12 @@ static OCSP_RESPONSE *read_response(apr_socket_t *sd, BIO *bio, conn_rec *c,
"error reading response from OCSP server");
return NULL;
}
+ if (len == 0) {
+ /* Ignore zero-length buckets (possible side-effect of
+ * line splitting). */
+ apr_bucket_delete(e);
+ continue;
+ }
count += len;
if (count > MAX_CONTENT) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, APLOGNO(01986)
diff --git a/modules/ssl/ssl_util_ssl.c b/modules/ssl/ssl_util_ssl.c
index 48b561dc..a8c36adb 100644
--- a/modules/ssl/ssl_util_ssl.c
+++ b/modules/ssl/ssl_util_ssl.c
@@ -338,6 +338,82 @@ BOOL SSL_X509_getIDs(apr_pool_t *p, X509 *x509, apr_array_header_t **ids)
return apr_is_empty_array(*ids) ? FALSE : TRUE;
}
+/*
+ * Check if a certificate matches for a particular name, by iterating over its
+ * DNS-IDs and CN-IDs (RFC 6125), optionally with basic wildcard matching.
+ * If server_rec is non-NULL, some (debug/trace) logging is enabled.
+ */
+BOOL SSL_X509_match_name(apr_pool_t *p, X509 *x509, const char *name,
+ BOOL allow_wildcard, server_rec *s)
+{
+ BOOL matched = FALSE;
+ apr_array_header_t *ids;
+
+ /*
+ * At some day in the future, this might be replaced with X509_check_host()
+ * (available in OpenSSL 1.0.2 and later), but two points should be noted:
+ * 1) wildcard matching in X509_check_host() might yield different
+ * results (by default, it supports a broader set of patterns, e.g.
+ * wildcards in non-initial positions);
+ * 2) we lose the option of logging each DNS- and CN-ID (until a match
+ * is found).
+ */
+
+ if (SSL_X509_getIDs(p, x509, &ids)) {
+ const char *cp;
+ int i;
+ char **id = (char **)ids->elts;
+ BOOL is_wildcard;
+
+ for (i = 0; i < ids->nelts; i++) {
+ if (!id[i])
+ continue;
+
+ /*
+ * Determine if it is a wildcard ID - we're restrictive
+ * in the sense that we require the wildcard character to be
+ * THE left-most label (i.e., the ID must start with "*.")
+ */
+ is_wildcard = (*id[i] == '*' && *(id[i]+1) == '.') ? TRUE : FALSE;
+
+ /*
+ * If the ID includes a wildcard character (and the caller is
+ * allowing wildcards), check if it matches for the left-most
+ * DNS label - i.e., the wildcard character is not allowed
+ * to match a dot. Otherwise, try a simple string compare.
+ */
+ if ((allow_wildcard == TRUE && is_wildcard == TRUE &&
+ (cp = ap_strchr_c(name, '.')) && !strcasecmp(id[i]+1, cp)) ||
+ !strcasecmp(id[i], name)) {
+ matched = TRUE;
+ }
+
+ if (s) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "[%s] SSL_X509_match_name: expecting name '%s', "
+ "%smatched by ID '%s'",
+ (mySrvConfig(s))->vhost_id, name,
+ matched == TRUE ? "" : "NOT ", id[i]);
+ }
+
+ if (matched == TRUE) {
+ break;
+ }
+ }
+
+ }
+
+ if (s) {
+ ssl_log_xerror(SSLLOG_MARK, APLOG_DEBUG, 0, p, s, x509,
+ APLOGNO(02412) "[%s] Cert %s for name '%s'",
+ (mySrvConfig(s))->vhost_id,
+ matched == TRUE ? "matches" : "does not match",
+ name);
+ }
+
+ return matched;
+}
+
/* _________________________________________________________________
**
** Low-Level CA Certificate Loading
@@ -479,14 +555,15 @@ int SSL_CTX_use_certificate_chain(
char *SSL_SESSION_id2sz(unsigned char *id, int idlen,
char *str, int strsize)
{
- char *cp;
- int n;
+ if (idlen > SSL_MAX_SSL_SESSION_ID_LENGTH)
+ idlen = SSL_MAX_SSL_SESSION_ID_LENGTH;
+
+ /* We must ensure not to process more than what would fit in the
+ * destination buffer, including terminating NULL */
+ if (idlen > (strsize-1) / 2)
+ idlen = (strsize-1) / 2;
+
+ ap_bin2hex(id, idlen, str);
- cp = str;
- for (n = 0; n < idlen && n < SSL_MAX_SSL_SESSION_ID_LENGTH; n++) {
- apr_snprintf(cp, strsize - (cp-str), "%02X", id[n]);
- cp += 2;
- }
- *cp = NUL;
return str;
}
diff --git a/modules/ssl/ssl_util_ssl.h b/modules/ssl/ssl_util_ssl.h
index 1688bb67..4b882db2 100644
--- a/modules/ssl/ssl_util_ssl.h
+++ b/modules/ssl/ssl_util_ssl.h
@@ -68,6 +68,7 @@ BOOL SSL_X509_getBC(X509 *, int *, int *);
char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne);
char *SSL_X509_NAME_to_string(apr_pool_t *, X509_NAME *, int);
BOOL SSL_X509_getIDs(apr_pool_t *, X509 *, apr_array_header_t **);
+BOOL SSL_X509_match_name(apr_pool_t *, X509 *, const char *, BOOL, server_rec *);
BOOL SSL_X509_INFO_load_file(apr_pool_t *, STACK_OF(X509_INFO) *, const char *);
BOOL SSL_X509_INFO_load_path(apr_pool_t *, STACK_OF(X509_INFO) *, const char *);
int SSL_CTX_use_certificate_chain(SSL_CTX *, char *, int, pem_password_cb *);
diff --git a/modules/ssl/ssl_util_stapling.c b/modules/ssl/ssl_util_stapling.c
index 89be7f53..0387cf92 100644
--- a/modules/ssl/ssl_util_stapling.c
+++ b/modules/ssl/ssl_util_stapling.c
@@ -449,7 +449,7 @@ static BOOL stapling_renew_response(server_rec *s, modssl_ctx_t *mctx, SSL *ssl,
stapling_check_response(s, mctx, cinf, *prsp, &ok);
if (ok == FALSE) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01943)
- "stapling_renew_response: error in retreived response!");
+ "stapling_renew_response: error in retrieved response!");
}
}
else {