summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/aaa/mod_access_compat.mak4
-rw-r--r--modules/aaa/mod_allowmethods.mak4
-rw-r--r--modules/aaa/mod_auth_basic.mak4
-rw-r--r--modules/aaa/mod_auth_digest.mak4
-rw-r--r--modules/aaa/mod_auth_form.mak4
-rw-r--r--modules/aaa/mod_authn_anon.mak4
-rw-r--r--modules/aaa/mod_authn_core.mak4
-rw-r--r--modules/aaa/mod_authn_dbd.mak4
-rw-r--r--modules/aaa/mod_authn_dbm.mak4
-rw-r--r--modules/aaa/mod_authn_file.mak4
-rw-r--r--modules/aaa/mod_authn_socache.mak4
-rw-r--r--modules/aaa/mod_authnz_fcgi.mak4
-rw-r--r--modules/aaa/mod_authnz_ldap.mak4
-rw-r--r--modules/aaa/mod_authz_core.mak4
-rw-r--r--modules/aaa/mod_authz_dbd.mak4
-rw-r--r--modules/aaa/mod_authz_dbm.mak4
-rw-r--r--modules/aaa/mod_authz_groupfile.mak4
-rw-r--r--modules/aaa/mod_authz_host.c1
-rw-r--r--modules/aaa/mod_authz_host.mak4
-rw-r--r--modules/aaa/mod_authz_owner.mak4
-rw-r--r--modules/aaa/mod_authz_user.mak4
-rw-r--r--modules/arch/win32/mod_isapi.mak4
-rw-r--r--modules/cache/cache_util.c35
-rw-r--r--modules/cache/mod_cache.mak4
-rw-r--r--modules/cache/mod_cache_disk.mak4
-rw-r--r--modules/cache/mod_cache_socache.mak4
-rw-r--r--modules/cache/mod_file_cache.mak4
-rw-r--r--modules/cache/mod_socache_dbm.mak4
-rw-r--r--modules/cache/mod_socache_dc.mak4
-rw-r--r--modules/cache/mod_socache_memcache.mak4
-rw-r--r--modules/cache/mod_socache_shmcb.mak4
-rw-r--r--modules/cluster/mod_heartbeat.mak4
-rw-r--r--modules/cluster/mod_heartmonitor.mak4
-rw-r--r--modules/core/config.m42
-rw-r--r--modules/core/mod_macro.mak4
-rw-r--r--modules/core/mod_watchdog.mak4
-rw-r--r--modules/database/mod_dbd.mak4
-rw-r--r--modules/dav/fs/mod_dav_fs.mak4
-rw-r--r--modules/dav/lock/mod_dav_lock.mak4
-rw-r--r--modules/dav/main/mod_dav.c6
-rw-r--r--modules/dav/main/mod_dav.h1
-rw-r--r--modules/dav/main/mod_dav.mak4
-rw-r--r--modules/debugging/mod_bucketeer.mak4
-rw-r--r--modules/debugging/mod_dumpio.mak4
-rw-r--r--modules/echo/mod_echo.mak4
-rw-r--r--modules/examples/mod_case_filter.mak4
-rw-r--r--modules/examples/mod_case_filter_in.mak4
-rw-r--r--modules/examples/mod_example_hooks.mak4
-rw-r--r--modules/examples/mod_example_ipc.mak4
-rw-r--r--modules/filters/mod_buffer.mak4
-rw-r--r--modules/filters/mod_charset_lite.mak4
-rw-r--r--modules/filters/mod_data.mak4
-rw-r--r--modules/filters/mod_deflate.mak4
-rw-r--r--modules/filters/mod_ext_filter.mak4
-rw-r--r--modules/filters/mod_filter.mak4
-rw-r--r--modules/filters/mod_include.c10
-rw-r--r--modules/filters/mod_include.mak4
-rw-r--r--modules/filters/mod_proxy_html.mak4
-rw-r--r--modules/filters/mod_ratelimit.mak4
-rw-r--r--modules/filters/mod_reflector.mak4
-rw-r--r--modules/filters/mod_reqtimeout.mak4
-rw-r--r--modules/filters/mod_request.mak4
-rw-r--r--modules/filters/mod_sed.mak4
-rw-r--r--modules/filters/mod_substitute.mak4
-rw-r--r--modules/filters/mod_xml2enc.mak4
-rw-r--r--modules/filters/sed0.c4
-rw-r--r--modules/filters/sed1.c1
-rw-r--r--modules/generators/mod_asis.mak4
-rw-r--r--modules/generators/mod_autoindex.mak4
-rw-r--r--modules/generators/mod_cgi.mak4
-rw-r--r--modules/generators/mod_info.mak4
-rw-r--r--modules/generators/mod_status.mak4
-rw-r--r--modules/http/http_core.c4
-rw-r--r--modules/http/http_protocol.c28
-rw-r--r--modules/http/mod_mime.mak4
-rw-r--r--modules/http2/NWGNUmakefile2
-rw-r--r--modules/http2/NWGNUmod_http215
-rw-r--r--modules/http2/NWGNUproxyht2287
-rw-r--r--modules/http2/config2.m434
-rw-r--r--modules/http2/h2.h3
-rw-r--r--modules/http2/h2_bucket_beam.c1015
-rw-r--r--modules/http2/h2_bucket_beam.h363
-rw-r--r--modules/http2/h2_bucket_eoc.c1
-rw-r--r--modules/http2/h2_bucket_eos.c14
-rw-r--r--modules/http2/h2_conn.c1
-rw-r--r--modules/http2/h2_conn_io.c333
-rw-r--r--modules/http2/h2_conn_io.h27
-rw-r--r--modules/http2/h2_ctx.c1
-rw-r--r--modules/http2/h2_filter.c72
-rw-r--r--modules/http2/h2_filter.h28
-rw-r--r--modules/http2/h2_from_h1.c28
-rw-r--r--modules/http2/h2_from_h1.h3
-rw-r--r--modules/http2/h2_h2.c9
-rw-r--r--modules/http2/h2_int_queue.c187
-rw-r--r--modules/http2/h2_int_queue.h108
-rw-r--r--modules/http2/h2_io.c453
-rw-r--r--modules/http2/h2_io.h175
-rw-r--r--modules/http2/h2_io_set.c159
-rw-r--r--modules/http2/h2_io_set.h53
-rw-r--r--modules/http2/h2_mplx.c1432
-rw-r--r--modules/http2/h2_mplx.h121
-rw-r--r--modules/http2/h2_ngn_shed.c32
-rw-r--r--modules/http2/h2_proxy_session.c1368
-rw-r--r--modules/http2/h2_proxy_session.h111
-rw-r--r--modules/http2/h2_proxy_util.c705
-rw-r--r--modules/http2/h2_proxy_util.h181
-rw-r--r--modules/http2/h2_push.c6
-rw-r--r--modules/http2/h2_push.h2
-rw-r--r--modules/http2/h2_request.c170
-rw-r--r--modules/http2/h2_request.h17
-rw-r--r--modules/http2/h2_response.c3
-rw-r--r--modules/http2/h2_session.c899
-rw-r--r--modules/http2/h2_session.h21
-rw-r--r--modules/http2/h2_stream.c507
-rw-r--r--modules/http2/h2_stream.h69
-rw-r--r--modules/http2/h2_task.c568
-rw-r--r--modules/http2/h2_task.h63
-rw-r--r--modules/http2/h2_task_input.c228
-rw-r--r--modules/http2/h2_task_input.h46
-rw-r--r--modules/http2/h2_task_output.c176
-rw-r--r--modules/http2/h2_task_output.h50
-rw-r--r--modules/http2/h2_task_queue.c177
-rw-r--r--modules/http2/h2_task_queue.h97
-rw-r--r--modules/http2/h2_util.c865
-rw-r--r--modules/http2/h2_util.h209
-rw-r--r--modules/http2/h2_version.h4
-rw-r--r--modules/http2/h2_worker.c47
-rw-r--r--modules/http2/h2_worker.h12
-rw-r--r--modules/http2/h2_workers.c6
-rw-r--r--modules/http2/mod_http2.c23
-rw-r--r--modules/http2/mod_http2.dep33
-rw-r--r--modules/http2/mod_http2.dsp24
-rw-r--r--modules/http2/mod_http2.mak58
-rw-r--r--modules/http2/mod_proxy_http2.c650
-rw-r--r--modules/http2/mod_proxy_http2.dep208
-rw-r--r--modules/http2/mod_proxy_http2.dsp119
-rw-r--r--modules/http2/mod_proxy_http2.h (renamed from modules/http2/mod_h2.h)5
-rw-r--r--modules/http2/mod_proxy_http2.mak427
-rw-r--r--modules/ldap/mod_ldap.mak4
-rw-r--r--modules/loggers/mod_log_config.mak4
-rw-r--r--modules/loggers/mod_log_debug.mak4
-rw-r--r--modules/loggers/mod_log_forensic.mak4
-rw-r--r--modules/loggers/mod_logio.mak4
-rw-r--r--modules/lua/mod_lua.mak4
-rw-r--r--modules/mappers/mod_actions.mak4
-rw-r--r--modules/mappers/mod_alias.mak4
-rw-r--r--modules/mappers/mod_dir.mak4
-rw-r--r--modules/mappers/mod_imagemap.mak4
-rw-r--r--modules/mappers/mod_negotiation.mak4
-rw-r--r--modules/mappers/mod_rewrite.c8
-rw-r--r--modules/mappers/mod_rewrite.mak4
-rw-r--r--modules/mappers/mod_speling.mak4
-rw-r--r--modules/mappers/mod_userdir.c8
-rw-r--r--modules/mappers/mod_userdir.mak4
-rw-r--r--modules/mappers/mod_vhost_alias.mak4
-rw-r--r--modules/metadata/mod_cern_meta.mak4
-rw-r--r--modules/metadata/mod_env.mak4
-rw-r--r--modules/metadata/mod_expires.mak4
-rw-r--r--modules/metadata/mod_headers.mak4
-rw-r--r--modules/metadata/mod_ident.mak4
-rw-r--r--modules/metadata/mod_mime_magic.mak4
-rw-r--r--modules/metadata/mod_remoteip.mak4
-rw-r--r--modules/metadata/mod_setenvif.mak4
-rw-r--r--modules/metadata/mod_unique_id.mak4
-rw-r--r--modules/metadata/mod_usertrack.mak4
-rw-r--r--modules/metadata/mod_version.mak4
-rw-r--r--modules/proxy/NWGNUmakefile1
-rw-r--r--modules/proxy/balancers/config2.m48
-rw-r--r--modules/proxy/balancers/mod_lbmethod_bybusyness.mak4
-rw-r--r--modules/proxy/balancers/mod_lbmethod_byrequests.mak4
-rw-r--r--modules/proxy/balancers/mod_lbmethod_bytraffic.mak4
-rw-r--r--modules/proxy/balancers/mod_lbmethod_heartbeat.mak4
-rw-r--r--modules/proxy/config.m446
-rw-r--r--modules/proxy/mod_proxy.c60
-rw-r--r--modules/proxy/mod_proxy.h79
-rw-r--r--modules/proxy/mod_proxy.mak4
-rw-r--r--modules/proxy/mod_proxy_ajp.mak4
-rw-r--r--modules/proxy/mod_proxy_balancer.c196
-rw-r--r--modules/proxy/mod_proxy_balancer.mak4
-rw-r--r--modules/proxy/mod_proxy_connect.mak4
-rw-r--r--modules/proxy/mod_proxy_express.mak4
-rw-r--r--modules/proxy/mod_proxy_fcgi.c25
-rw-r--r--modules/proxy/mod_proxy_fcgi.mak4
-rw-r--r--modules/proxy/mod_proxy_ftp.mak4
-rw-r--r--modules/proxy/mod_proxy_hcheck.c1175
-rw-r--r--modules/proxy/mod_proxy_http.mak4
-rw-r--r--modules/proxy/mod_proxy_scgi.c5
-rw-r--r--modules/proxy/mod_proxy_scgi.mak4
-rw-r--r--modules/proxy/mod_proxy_wstunnel.mak4
-rw-r--r--modules/proxy/proxy_util.c78
-rw-r--r--modules/session/mod_session.mak4
-rw-r--r--modules/session/mod_session_cookie.mak4
-rw-r--r--modules/session/mod_session_crypto.mak4
-rw-r--r--modules/session/mod_session_dbd.mak4
-rw-r--r--modules/slotmem/mod_slotmem_plain.mak4
-rw-r--r--modules/slotmem/mod_slotmem_shm.mak4
-rw-r--r--modules/ssl/mod_ssl.c4
-rw-r--r--modules/ssl/mod_ssl.mak4
-rw-r--r--modules/ssl/ssl_engine_config.c39
-rw-r--r--modules/ssl/ssl_engine_init.c12
-rw-r--r--modules/ssl/ssl_engine_io.c42
-rw-r--r--modules/ssl/ssl_engine_kernel.c22
-rw-r--r--modules/ssl/ssl_private.h14
203 files changed, 10536 insertions, 4936 deletions
diff --git a/modules/aaa/mod_access_compat.mak b/modules/aaa/mod_access_compat.mak
index d9bfd3c4..4d807046 100644
--- a/modules/aaa/mod_access_compat.mak
+++ b/modules/aaa/mod_access_compat.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_access_compat.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_access_compat.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_access_compat.so" /d LONG_NAME="access_compat_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_access_compat.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_access_compat.so" /d LONG_NAME="access_compat_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_access_compat - Win32 Debug"
"$(INTDIR)\mod_access_compat.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_access_compat.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_access_compat.so" /d LONG_NAME="access_compat_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_access_compat.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_access_compat.so" /d LONG_NAME="access_compat_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_allowmethods.mak b/modules/aaa/mod_allowmethods.mak
index 20eee0aa..10495150 100644
--- a/modules/aaa/mod_allowmethods.mak
+++ b/modules/aaa/mod_allowmethods.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_allowmethods.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_allowmethods.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_allowmethods.so" /d LONG_NAME="allowmethods_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_allowmethods.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_allowmethods.so" /d LONG_NAME="allowmethods_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_allowmethods - Win32 Debug"
"$(INTDIR)\mod_allowmethods.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_allowmethods.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_allowmethods.so" /d LONG_NAME="allowmethods_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_allowmethods.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_allowmethods.so" /d LONG_NAME="allowmethods_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_auth_basic.mak b/modules/aaa/mod_auth_basic.mak
index ef19c2bf..ddd5198d 100644
--- a/modules/aaa/mod_auth_basic.mak
+++ b/modules/aaa/mod_auth_basic.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_auth_basic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_basic.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_auth_basic.so" /d LONG_NAME="auth_basic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_basic.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_auth_basic.so" /d LONG_NAME="auth_basic_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_auth_basic - Win32 Debug"
"$(INTDIR)\mod_auth_basic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_basic.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_auth_basic.so" /d LONG_NAME="auth_basic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_basic.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_auth_basic.so" /d LONG_NAME="auth_basic_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_auth_digest.mak b/modules/aaa/mod_auth_digest.mak
index 0cda52ea..4b543756 100644
--- a/modules/aaa/mod_auth_digest.mak
+++ b/modules/aaa/mod_auth_digest.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_auth_digest.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_digest.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_auth_digest.so" /d LONG_NAME="auth_digest_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_digest.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_auth_digest.so" /d LONG_NAME="auth_digest_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_auth_digest - Win32 Debug"
"$(INTDIR)\mod_auth_digest.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_digest.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_auth_digest.so" /d LONG_NAME="auth_digest_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_digest.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_auth_digest.so" /d LONG_NAME="auth_digest_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_auth_form.mak b/modules/aaa/mod_auth_form.mak
index 6c1e804c..3c6c67a2 100644
--- a/modules/aaa/mod_auth_form.mak
+++ b/modules/aaa/mod_auth_form.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_auth_form.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_form.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_auth_form.so" /d LONG_NAME="auth_form_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_form.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_auth_form.so" /d LONG_NAME="auth_form_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_auth_form - Win32 Debug"
"$(INTDIR)\mod_auth_form.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_form.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_auth_form.so" /d LONG_NAME="auth_form_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_auth_form.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_auth_form.so" /d LONG_NAME="auth_form_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authn_anon.mak b/modules/aaa/mod_authn_anon.mak
index 965d8050..6ff99728 100644
--- a/modules/aaa/mod_authn_anon.mak
+++ b/modules/aaa/mod_authn_anon.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authn_anon.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_anon.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_anon.so" /d LONG_NAME="authn_anon_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_anon.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_anon.so" /d LONG_NAME="authn_anon_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authn_anon - Win32 Debug"
"$(INTDIR)\mod_authn_anon.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_anon.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_anon.so" /d LONG_NAME="authn_anon_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_anon.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_anon.so" /d LONG_NAME="authn_anon_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authn_core.mak b/modules/aaa/mod_authn_core.mak
index 6a2baec9..ec88b2dd 100644
--- a/modules/aaa/mod_authn_core.mak
+++ b/modules/aaa/mod_authn_core.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authn_core.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_core.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_core.so" /d LONG_NAME="authn_core_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_core.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_core.so" /d LONG_NAME="authn_core_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authn_core - Win32 Debug"
"$(INTDIR)\mod_authn_core.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_core.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_core.so" /d LONG_NAME="authn_core_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_core.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_core.so" /d LONG_NAME="authn_core_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authn_dbd.mak b/modules/aaa/mod_authn_dbd.mak
index 81de1fe3..9c983dcd 100644
--- a/modules/aaa/mod_authn_dbd.mak
+++ b/modules/aaa/mod_authn_dbd.mak
@@ -387,14 +387,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authn_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_dbd.so" /d LONG_NAME="authn_dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_dbd.so" /d LONG_NAME="authn_dbd_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authn_dbd - Win32 Debug"
"$(INTDIR)\mod_authn_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_dbd.so" /d LONG_NAME="authn_dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_dbd.so" /d LONG_NAME="authn_dbd_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authn_dbm.mak b/modules/aaa/mod_authn_dbm.mak
index ffc4127f..e15242a7 100644
--- a/modules/aaa/mod_authn_dbm.mak
+++ b/modules/aaa/mod_authn_dbm.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authn_dbm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_dbm.so" /d LONG_NAME="authn_dbm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_dbm.so" /d LONG_NAME="authn_dbm_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authn_dbm - Win32 Debug"
"$(INTDIR)\mod_authn_dbm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_dbm.so" /d LONG_NAME="authn_dbm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_dbm.so" /d LONG_NAME="authn_dbm_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authn_file.mak b/modules/aaa/mod_authn_file.mak
index b36cd3ee..3d98ce20 100644
--- a/modules/aaa/mod_authn_file.mak
+++ b/modules/aaa/mod_authn_file.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authn_file.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_file.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_file.so" /d LONG_NAME="authn_file_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_file.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_file.so" /d LONG_NAME="authn_file_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authn_file - Win32 Debug"
"$(INTDIR)\mod_authn_file.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_file.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_file.so" /d LONG_NAME="authn_file_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_file.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_file.so" /d LONG_NAME="authn_file_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authn_socache.mak b/modules/aaa/mod_authn_socache.mak
index 7ba5e796..7d434731 100644
--- a/modules/aaa/mod_authn_socache.mak
+++ b/modules/aaa/mod_authn_socache.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authn_socache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_socache.so" /d LONG_NAME="authn_socache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authn_socache.so" /d LONG_NAME="authn_socache_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authn_socache - Win32 Debug"
"$(INTDIR)\mod_authn_socache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_socache.so" /d LONG_NAME="authn_socache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authn_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authn_socache.so" /d LONG_NAME="authn_socache_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authnz_fcgi.mak b/modules/aaa/mod_authnz_fcgi.mak
index d933d14f..772cae2f 100644
--- a/modules/aaa/mod_authnz_fcgi.mak
+++ b/modules/aaa/mod_authnz_fcgi.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authnz_fcgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authnz_fcgi.so" /d LONG_NAME="authnz_fcgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authnz_fcgi.so" /d LONG_NAME="authnz_fcgi_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authnz_fcgi - Win32 Debug"
"$(INTDIR)\mod_authnz_fcgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authnz_fcgi.so" /d LONG_NAME="authnz_fcgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authnz_fcgi.so" /d LONG_NAME="authnz_fcgi_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authnz_ldap.mak b/modules/aaa/mod_authnz_ldap.mak
index bdecc42c..96cc0448 100644
--- a/modules/aaa/mod_authnz_ldap.mak
+++ b/modules/aaa/mod_authnz_ldap.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authnz_ldap.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authnz_ldap.so" /d LONG_NAME="authnz_ldap_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authnz_ldap.so" /d LONG_NAME="authnz_ldap_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authnz_ldap - Win32 Debug"
"$(INTDIR)\mod_authnz_ldap.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authnz_ldap.so" /d LONG_NAME="authnz_ldap_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authnz_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authnz_ldap.so" /d LONG_NAME="authnz_ldap_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authz_core.mak b/modules/aaa/mod_authz_core.mak
index fec1642a..7351f674 100644
--- a/modules/aaa/mod_authz_core.mak
+++ b/modules/aaa/mod_authz_core.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authz_core.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_core.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_core.so" /d LONG_NAME="authz_core_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_core.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_core.so" /d LONG_NAME="authz_core_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authz_core - Win32 Debug"
"$(INTDIR)\mod_authz_core.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_core.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_core.so" /d LONG_NAME="authz_core_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_core.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_core.so" /d LONG_NAME="authz_core_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authz_dbd.mak b/modules/aaa/mod_authz_dbd.mak
index b9a8c24b..da7a4533 100644
--- a/modules/aaa/mod_authz_dbd.mak
+++ b/modules/aaa/mod_authz_dbd.mak
@@ -387,14 +387,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authz_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_dbd.so" /d LONG_NAME="authz_dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_dbd.so" /d LONG_NAME="authz_dbd_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authz_dbd - Win32 Debug"
"$(INTDIR)\mod_authz_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_dbd.so" /d LONG_NAME="authz_dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_dbd.so" /d LONG_NAME="authz_dbd_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authz_dbm.mak b/modules/aaa/mod_authz_dbm.mak
index 9296edab..4be17d5b 100644
--- a/modules/aaa/mod_authz_dbm.mak
+++ b/modules/aaa/mod_authz_dbm.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authz_dbm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_dbm.so" /d LONG_NAME="authz_dbm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_dbm.so" /d LONG_NAME="authz_dbm_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authz_dbm - Win32 Debug"
"$(INTDIR)\mod_authz_dbm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_dbm.so" /d LONG_NAME="authz_dbm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_dbm.so" /d LONG_NAME="authz_dbm_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authz_groupfile.mak b/modules/aaa/mod_authz_groupfile.mak
index caef2166..37d729cc 100644
--- a/modules/aaa/mod_authz_groupfile.mak
+++ b/modules/aaa/mod_authz_groupfile.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authz_groupfile.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_groupfile.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_groupfile.so" /d LONG_NAME="authz_groupfile_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_groupfile.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_groupfile.so" /d LONG_NAME="authz_groupfile_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authz_groupfile - Win32 Debug"
"$(INTDIR)\mod_authz_groupfile.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_groupfile.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_groupfile.so" /d LONG_NAME="authz_groupfile_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_groupfile.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_groupfile.so" /d LONG_NAME="authz_groupfile_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authz_host.c b/modules/aaa/mod_authz_host.c
index dff1d322..76f95b84 100644
--- a/modules/aaa/mod_authz_host.c
+++ b/modules/aaa/mod_authz_host.c
@@ -217,6 +217,7 @@ forward_dns_check_authorization(request_rec *r,
require = ap_expr_str_exec(r, expr, &err);
if (err) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03354)
+ "authz_host authorize: require forward-dns: "
"Can't evaluate require expression: %s", err);
return AUTHZ_DENIED;
}
diff --git a/modules/aaa/mod_authz_host.mak b/modules/aaa/mod_authz_host.mak
index 72ee8a30..1ad9e852 100644
--- a/modules/aaa/mod_authz_host.mak
+++ b/modules/aaa/mod_authz_host.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authz_host.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_host.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_host.so" /d LONG_NAME="authz_host_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_host.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_host.so" /d LONG_NAME="authz_host_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authz_host - Win32 Debug"
"$(INTDIR)\mod_authz_host.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_host.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_host.so" /d LONG_NAME="authz_host_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_host.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_host.so" /d LONG_NAME="authz_host_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authz_owner.mak b/modules/aaa/mod_authz_owner.mak
index 9fa49e74..850a1f76 100644
--- a/modules/aaa/mod_authz_owner.mak
+++ b/modules/aaa/mod_authz_owner.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authz_owner.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_owner.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_owner.so" /d LONG_NAME="authz_owner_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_owner.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_owner.so" /d LONG_NAME="authz_owner_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authz_owner - Win32 Debug"
"$(INTDIR)\mod_authz_owner.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_owner.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_owner.so" /d LONG_NAME="authz_owner_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_owner.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_owner.so" /d LONG_NAME="authz_owner_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/aaa/mod_authz_user.mak b/modules/aaa/mod_authz_user.mak
index 31132bd7..0989f6e5 100644
--- a/modules/aaa/mod_authz_user.mak
+++ b/modules/aaa/mod_authz_user.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_authz_user.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_user.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_user.so" /d LONG_NAME="authz_user_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_user.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_authz_user.so" /d LONG_NAME="authz_user_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_authz_user - Win32 Debug"
"$(INTDIR)\mod_authz_user.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_user.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_user.so" /d LONG_NAME="authz_user_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_authz_user.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_authz_user.so" /d LONG_NAME="authz_user_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/arch/win32/mod_isapi.mak b/modules/arch/win32/mod_isapi.mak
index df1a0cec..cee90474 100644
--- a/modules/arch/win32/mod_isapi.mak
+++ b/modules/arch/win32/mod_isapi.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_isapi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_isapi.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_isapi.so" /d LONG_NAME="isapi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_isapi.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_isapi.so" /d LONG_NAME="isapi_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_isapi - Win32 Debug"
"$(INTDIR)\mod_isapi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_isapi.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_isapi.so" /d LONG_NAME="isapi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_isapi.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_isapi.so" /d LONG_NAME="isapi_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/cache_util.c b/modules/cache/cache_util.c
index 070a5f0d..1d65d3f7 100644
--- a/modules/cache/cache_util.c
+++ b/modules/cache/cache_util.c
@@ -576,7 +576,12 @@ int cache_check_freshness(cache_handle_t *h, cache_request_rec *cache,
}
if ((agestr = apr_table_get(h->resp_hdrs, "Age"))) {
- age_c = apr_atoi64(agestr);
+ char *endp;
+ apr_off_t offt;
+ if (!apr_strtoff(&offt, agestr, &endp, 10)
+ && endp > agestr && !*endp) {
+ age_c = offt;
+ }
}
/* calculate age of object */
@@ -990,6 +995,8 @@ int ap_cache_control(request_rec *r, cache_control_t *cc,
}
if (cc_header) {
+ char *endp;
+ apr_off_t offt;
char *header = apr_pstrdup(r->pool, cc_header);
const char *token = cache_strqtok(header, CACHE_SEPARATOR, &last);
while (token) {
@@ -1033,27 +1040,33 @@ int ap_cache_control(request_rec *r, cache_control_t *cc,
}
/* ...then try slowest cases */
else if (!strncasecmp(token, "max-age", 7)) {
- if (token[7] == '=') {
+ if (token[7] == '='
+ && !apr_strtoff(&offt, token + 8, &endp, 10)
+ && endp > token + 8 && !*endp) {
cc->max_age = 1;
- cc->max_age_value = apr_atoi64(token + 8);
+ cc->max_age_value = offt;
}
break;
}
else if (!strncasecmp(token, "max-stale", 9)) {
- if (token[9] == '=') {
+ if (token[9] == '='
+ && !apr_strtoff(&offt, token + 10, &endp, 10)
+ && endp > token + 10 && !*endp) {
cc->max_stale = 1;
- cc->max_stale_value = apr_atoi64(token + 10);
+ cc->max_stale_value = offt;
}
- else if (!token[10]) {
+ else if (!token[9]) {
cc->max_stale = 1;
cc->max_stale_value = -1;
}
break;
}
else if (!strncasecmp(token, "min-fresh", 9)) {
- if (token[9] == '=') {
+ if (token[9] == '='
+ && !apr_strtoff(&offt, token + 10, &endp, 10)
+ && endp > token + 10 && !*endp) {
cc->min_fresh = 1;
- cc->min_fresh_value = apr_atoi64(token + 10);
+ cc->min_fresh_value = offt;
}
break;
}
@@ -1096,9 +1109,11 @@ int ap_cache_control(request_rec *r, cache_control_t *cc,
case 's':
case 'S': {
if (!strncasecmp(token, "s-maxage", 8)) {
- if (token[8] == '=') {
+ if (token[8] == '='
+ && !apr_strtoff(&offt, token + 9, &endp, 10)
+ && endp > token + 9 && !*endp) {
cc->s_maxage = 1;
- cc->s_maxage_value = apr_atoi64(token + 9);
+ cc->s_maxage_value = offt;
}
break;
}
diff --git a/modules/cache/mod_cache.mak b/modules/cache/mod_cache.mak
index 2da30fb3..a89b1bc9 100644
--- a/modules/cache/mod_cache.mak
+++ b/modules/cache/mod_cache.mak
@@ -353,14 +353,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_cache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_cache.so" /d LONG_NAME="cache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_cache.so" /d LONG_NAME="cache_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_cache - Win32 Debug"
"$(INTDIR)\mod_cache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_cache.so" /d LONG_NAME="cache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_cache.so" /d LONG_NAME="cache_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/mod_cache_disk.mak b/modules/cache/mod_cache_disk.mak
index 1289eb30..5b4dd7aa 100644
--- a/modules/cache/mod_cache_disk.mak
+++ b/modules/cache/mod_cache_disk.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_cache_disk.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_disk.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_cache_disk.so" /d LONG_NAME="cache_disk_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_disk.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_cache_disk.so" /d LONG_NAME="cache_disk_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_cache_disk - Win32 Debug"
"$(INTDIR)\mod_cache_disk.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_disk.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_cache_disk.so" /d LONG_NAME="cache_disk_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_disk.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_cache_disk.so" /d LONG_NAME="cache_disk_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/mod_cache_socache.mak b/modules/cache/mod_cache_socache.mak
index f1b0ac39..7857e7ff 100644
--- a/modules/cache/mod_cache_socache.mak
+++ b/modules/cache/mod_cache_socache.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_cache_socache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_cache_socache.so" /d LONG_NAME="cache_socache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_cache_socache.so" /d LONG_NAME="cache_socache_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_cache_socache - Win32 Debug"
"$(INTDIR)\mod_cache_socache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_cache_socache.so" /d LONG_NAME="cache_socache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cache_socache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_cache_socache.so" /d LONG_NAME="cache_socache_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/mod_file_cache.mak b/modules/cache/mod_file_cache.mak
index 5802e30a..0f54dc23 100644
--- a/modules/cache/mod_file_cache.mak
+++ b/modules/cache/mod_file_cache.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_file_cache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_file_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_file_cache.so" /d LONG_NAME="file_cache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_file_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_file_cache.so" /d LONG_NAME="file_cache_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_file_cache - Win32 Debug"
"$(INTDIR)\mod_file_cache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_file_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_file_cache.so" /d LONG_NAME="file_cache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_file_cache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_file_cache.so" /d LONG_NAME="file_cache_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/mod_socache_dbm.mak b/modules/cache/mod_socache_dbm.mak
index 03a3e535..93453f8b 100644
--- a/modules/cache/mod_socache_dbm.mak
+++ b/modules/cache/mod_socache_dbm.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_socache_dbm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_dbm.so" /d LONG_NAME="socache_dbm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_dbm.so" /d LONG_NAME="socache_dbm_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_socache_dbm - Win32 Debug"
"$(INTDIR)\mod_socache_dbm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_dbm.so" /d LONG_NAME="socache_dbm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dbm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_dbm.so" /d LONG_NAME="socache_dbm_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/mod_socache_dc.mak b/modules/cache/mod_socache_dc.mak
index 51382d91..0c4c3c08 100644
--- a/modules/cache/mod_socache_dc.mak
+++ b/modules/cache/mod_socache_dc.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_socache_dc.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dc.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_dc.so" /d LONG_NAME="socache_dc_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dc.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_dc.so" /d LONG_NAME="socache_dc_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_socache_dc - Win32 Debug"
"$(INTDIR)\mod_socache_dc.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dc.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_dc.so" /d LONG_NAME="socache_dc_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_dc.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_dc.so" /d LONG_NAME="socache_dc_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/mod_socache_memcache.mak b/modules/cache/mod_socache_memcache.mak
index 45fd0bb2..52b7508a 100644
--- a/modules/cache/mod_socache_memcache.mak
+++ b/modules/cache/mod_socache_memcache.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_socache_memcache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_memcache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_memcache.so" /d LONG_NAME="socache_memcache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_memcache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_memcache.so" /d LONG_NAME="socache_memcache_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_socache_memcache - Win32 Debug"
"$(INTDIR)\mod_socache_memcache.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_memcache.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_memcache.so" /d LONG_NAME="socache_memcache_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_memcache.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_memcache.so" /d LONG_NAME="socache_memcache_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cache/mod_socache_shmcb.mak b/modules/cache/mod_socache_shmcb.mak
index a966d780..7081784e 100644
--- a/modules/cache/mod_socache_shmcb.mak
+++ b/modules/cache/mod_socache_shmcb.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_socache_shmcb.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_shmcb.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_shmcb.so" /d LONG_NAME="socache_shmcb_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_shmcb.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_socache_shmcb.so" /d LONG_NAME="socache_shmcb_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_socache_shmcb - Win32 Debug"
"$(INTDIR)\mod_socache_shmcb.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_shmcb.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_shmcb.so" /d LONG_NAME="socache_shmcb_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_socache_shmcb.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_socache_shmcb.so" /d LONG_NAME="socache_shmcb_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cluster/mod_heartbeat.mak b/modules/cluster/mod_heartbeat.mak
index 7a72a998..7ce22c7f 100644
--- a/modules/cluster/mod_heartbeat.mak
+++ b/modules/cluster/mod_heartbeat.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_heartbeat.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartbeat.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_heartbeat.so" /d LONG_NAME="heartbeat_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartbeat.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_heartbeat.so" /d LONG_NAME="heartbeat_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_heartbeat - Win32 Debug"
"$(INTDIR)\mod_heartbeat.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartbeat.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_heartbeat.so" /d LONG_NAME="heartbeat_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartbeat.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_heartbeat.so" /d LONG_NAME="heartbeat_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/cluster/mod_heartmonitor.mak b/modules/cluster/mod_heartmonitor.mak
index e33bc965..2d935d57 100644
--- a/modules/cluster/mod_heartmonitor.mak
+++ b/modules/cluster/mod_heartmonitor.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_heartmonitor.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartmonitor.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_heartmonitor.so" /d LONG_NAME="heartmonitor_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartmonitor.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_heartmonitor.so" /d LONG_NAME="heartmonitor_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_heartmonitor - Win32 Debug"
"$(INTDIR)\mod_heartmonitor.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartmonitor.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_heartmonitor.so" /d LONG_NAME="heartmonitor_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_heartmonitor.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_heartmonitor.so" /d LONG_NAME="heartmonitor_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/core/config.m4 b/modules/core/config.m4
index 8bb39a5c..94fb4a12 100644
--- a/modules/core/config.m4
+++ b/modules/core/config.m4
@@ -45,7 +45,7 @@ fi
APACHE_MODULE(so, DSO capability. This module will be automatically enabled unless you build all modules statically., , , $enable_so)
-APACHE_MODULE(watchdog, Watchdog module, , , , [
+APACHE_MODULE(watchdog, Watchdog module, , , most, [
APR_CHECK_APR_DEFINE(APR_HAS_THREADS)
if test $ac_cv_define_APR_HAS_THREADS = "no"; then
AC_MSG_WARN([mod_watchdog requires apr to be built with --enable-threads])
diff --git a/modules/core/mod_macro.mak b/modules/core/mod_macro.mak
index c7c921e9..656d96a5 100644
--- a/modules/core/mod_macro.mak
+++ b/modules/core/mod_macro.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\httpd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_macro.so" /d LONG_NAME="macro_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_macro.so" /d LONG_NAME="macro_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_macro - Win32 Debug"
"$(INTDIR)\httpd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_macro.so" /d LONG_NAME="macro_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_macro.so" /d LONG_NAME="macro_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/core/mod_watchdog.mak b/modules/core/mod_watchdog.mak
index 865da38d..6b58c6d5 100644
--- a/modules/core/mod_watchdog.mak
+++ b/modules/core/mod_watchdog.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_watchdog.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_watchdog.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_watchdog.so" /d LONG_NAME="watchdog_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_watchdog.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_watchdog.so" /d LONG_NAME="watchdog_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_watchdog - Win32 Debug"
"$(INTDIR)\mod_watchdog.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_watchdog.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_watchdog.so" /d LONG_NAME="watchdog_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_watchdog.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_watchdog.so" /d LONG_NAME="watchdog_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/database/mod_dbd.mak b/modules/database/mod_dbd.mak
index 28f86b66..5cf22653 100644
--- a/modules/database/mod_dbd.mak
+++ b/modules/database/mod_dbd.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_dbd.so" /d LONG_NAME="dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_dbd.so" /d LONG_NAME="dbd_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_dbd - Win32 Debug"
"$(INTDIR)\mod_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_dbd.so" /d LONG_NAME="dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_dbd.so" /d LONG_NAME="dbd_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/dav/fs/mod_dav_fs.mak b/modules/dav/fs/mod_dav_fs.mak
index 4f87cd62..5baff674 100644
--- a/modules/dav/fs/mod_dav_fs.mak
+++ b/modules/dav/fs/mod_dav_fs.mak
@@ -390,14 +390,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_dav_fs.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_fs.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_dav_fs.so" /d LONG_NAME="dav_fs_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_fs.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_dav_fs.so" /d LONG_NAME="dav_fs_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_dav_fs - Win32 Debug"
"$(INTDIR)\mod_dav_fs.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_fs.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_dav_fs.so" /d LONG_NAME="dav_fs_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_fs.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_dav_fs.so" /d LONG_NAME="dav_fs_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/dav/lock/mod_dav_lock.mak b/modules/dav/lock/mod_dav_lock.mak
index 31e54595..0ae2b6a9 100644
--- a/modules/dav/lock/mod_dav_lock.mak
+++ b/modules/dav/lock/mod_dav_lock.mak
@@ -372,14 +372,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_dav_lock.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_lock.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_dav_lock.so" /d LONG_NAME="dav_lock_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_lock.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_dav_lock.so" /d LONG_NAME="dav_lock_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_dav_lock - Win32 Debug"
"$(INTDIR)\mod_dav_lock.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_lock.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_dav_lock.so" /d LONG_NAME="dav_lock_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav_lock.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_dav_lock.so" /d LONG_NAME="dav_lock_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/dav/main/mod_dav.c b/modules/dav/main/mod_dav.c
index f8bfb25d..22ff32f1 100644
--- a/modules/dav/main/mod_dav.c
+++ b/modules/dav/main/mod_dav.c
@@ -201,6 +201,12 @@ static void *dav_merge_dir_config(apr_pool_t *p, void *base, void *overrides)
return newconf;
}
+DAV_DECLARE(const char *) dav_get_provider_name(request_rec *r)
+{
+ dav_dir_conf *conf = ap_get_module_config(r->per_dir_config, &dav_module);
+ return conf ? conf->provider_name : NULL;
+}
+
static const dav_provider *dav_get_provider(request_rec *r)
{
dav_dir_conf *conf;
diff --git a/modules/dav/main/mod_dav.h b/modules/dav/main/mod_dav.h
index 96088d34..a51e7c53 100644
--- a/modules/dav/main/mod_dav.h
+++ b/modules/dav/main/mod_dav.h
@@ -665,6 +665,7 @@ DAV_DECLARE(const dav_hooks_search *) dav_get_search_hooks(request_rec *r);
DAV_DECLARE(void) dav_register_provider(apr_pool_t *p, const char *name,
const dav_provider *hooks);
DAV_DECLARE(const dav_provider *) dav_lookup_provider(const char *name);
+DAV_DECLARE(const char *) dav_get_provider_name(request_rec *r);
/* ### deprecated */
diff --git a/modules/dav/main/mod_dav.mak b/modules/dav/main/mod_dav.mak
index 5ef119a6..a107e22d 100644
--- a/modules/dav/main/mod_dav.mak
+++ b/modules/dav/main/mod_dav.mak
@@ -389,14 +389,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_dav.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_dav.so" /d LONG_NAME="dav_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_dav.so" /d LONG_NAME="dav_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_dav - Win32 Debug"
"$(INTDIR)\mod_dav.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_dav.so" /d LONG_NAME="dav_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dav.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_dav.so" /d LONG_NAME="dav_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/debugging/mod_bucketeer.mak b/modules/debugging/mod_bucketeer.mak
index aa8a00bc..0e31e944 100644
--- a/modules/debugging/mod_bucketeer.mak
+++ b/modules/debugging/mod_bucketeer.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_bucketeer.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_bucketeer.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_bucketeer.so" /d LONG_NAME="bucketeer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_bucketeer.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_bucketeer.so" /d LONG_NAME="bucketeer_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_bucketeer - Win32 Debug"
"$(INTDIR)\mod_bucketeer.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_bucketeer.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_bucketeer.so" /d LONG_NAME="bucketeer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_bucketeer.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_bucketeer.so" /d LONG_NAME="bucketeer_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/debugging/mod_dumpio.mak b/modules/debugging/mod_dumpio.mak
index 9cb1636b..d0943c74 100644
--- a/modules/debugging/mod_dumpio.mak
+++ b/modules/debugging/mod_dumpio.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_dumpio.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dumpio.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_dumpio.so" /d LONG_NAME="dumpio_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dumpio.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_dumpio.so" /d LONG_NAME="dumpio_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_dumpio - Win32 Debug"
"$(INTDIR)\mod_dumpio.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dumpio.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_dumpio.so" /d LONG_NAME="dumpio_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dumpio.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_dumpio.so" /d LONG_NAME="dumpio_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/echo/mod_echo.mak b/modules/echo/mod_echo.mak
index 05019f1d..6066fa70 100644
--- a/modules/echo/mod_echo.mak
+++ b/modules/echo/mod_echo.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_echo.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_echo.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_echo.so" /d LONG_NAME="echo_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_echo.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_echo.so" /d LONG_NAME="echo_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_echo - Win32 Debug"
"$(INTDIR)\mod_echo.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_echo.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_echo.so" /d LONG_NAME="echo_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_echo.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_echo.so" /d LONG_NAME="echo_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/examples/mod_case_filter.mak b/modules/examples/mod_case_filter.mak
index 928b655f..a7f04bfa 100644
--- a/modules/examples/mod_case_filter.mak
+++ b/modules/examples/mod_case_filter.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_case_filter.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_case_filter.so" /d LONG_NAME="case_filter_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_case_filter.so" /d LONG_NAME="case_filter_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_case_filter - Win32 Debug"
"$(INTDIR)\mod_case_filter.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_case_filter.so" /d LONG_NAME="case_filter_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_case_filter.so" /d LONG_NAME="case_filter_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/examples/mod_case_filter_in.mak b/modules/examples/mod_case_filter_in.mak
index 4d2fdcb1..28bb8481 100644
--- a/modules/examples/mod_case_filter_in.mak
+++ b/modules/examples/mod_case_filter_in.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_case_filter_in.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter_in.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_case_filter_in.so" /d LONG_NAME="case_filter_in_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter_in.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_case_filter_in.so" /d LONG_NAME="case_filter_in_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_case_filter_in - Win32 Debug"
"$(INTDIR)\mod_case_filter_in.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter_in.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_case_filter_in.so" /d LONG_NAME="case_filter_in_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_case_filter_in.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_case_filter_in.so" /d LONG_NAME="case_filter_in_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/examples/mod_example_hooks.mak b/modules/examples/mod_example_hooks.mak
index 5747c1d2..2c1e562e 100644
--- a/modules/examples/mod_example_hooks.mak
+++ b/modules/examples/mod_example_hooks.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_example_hooks.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_hooks.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_example_hooks.so" /d LONG_NAME="example_hooks_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_hooks.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_example_hooks.so" /d LONG_NAME="example_hooks_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_example_hooks - Win32 Debug"
"$(INTDIR)\mod_example_hooks.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_hooks.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_example_hooks.so" /d LONG_NAME="example_hooks_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_hooks.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_example_hooks.so" /d LONG_NAME="example_hooks_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/examples/mod_example_ipc.mak b/modules/examples/mod_example_ipc.mak
index a76b2800..218924e6 100644
--- a/modules/examples/mod_example_ipc.mak
+++ b/modules/examples/mod_example_ipc.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_example_ipc.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_ipc.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_example_ipc.so" /d LONG_NAME="example_ipc_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_ipc.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_example_ipc.so" /d LONG_NAME="example_ipc_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_example_ipc - Win32 Debug"
"$(INTDIR)\mod_example_ipc.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_ipc.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_example_ipc.so" /d LONG_NAME="example_ipc_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_example_ipc.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_example_ipc.so" /d LONG_NAME="example_ipc_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_buffer.mak b/modules/filters/mod_buffer.mak
index 521f6e74..d74bec25 100644
--- a/modules/filters/mod_buffer.mak
+++ b/modules/filters/mod_buffer.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_buffer.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_buffer.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_buffer.so" /d LONG_NAME="buffer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_buffer.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_buffer.so" /d LONG_NAME="buffer_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_buffer - Win32 Debug"
"$(INTDIR)\mod_buffer.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_buffer.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_buffer.so" /d LONG_NAME="buffer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_buffer.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_buffer.so" /d LONG_NAME="buffer_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_charset_lite.mak b/modules/filters/mod_charset_lite.mak
index ccfdc941..b2524706 100644
--- a/modules/filters/mod_charset_lite.mak
+++ b/modules/filters/mod_charset_lite.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_charset_lite.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_charset_lite.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_charset_lite.so" /d LONG_NAME="charset_lite_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_charset_lite.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_charset_lite.so" /d LONG_NAME="charset_lite_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_charset_lite - Win32 Debug"
"$(INTDIR)\mod_charset_lite.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_charset_lite.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_charset_lite.so" /d LONG_NAME="charset_lite_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_charset_lite.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_charset_lite.so" /d LONG_NAME="charset_lite_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_data.mak b/modules/filters/mod_data.mak
index b3b35aa5..cc73f510 100644
--- a/modules/filters/mod_data.mak
+++ b/modules/filters/mod_data.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_data.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_data.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_data.so" /d LONG_NAME="data_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_data.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_data.so" /d LONG_NAME="data_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_data - Win32 Debug"
"$(INTDIR)\mod_data.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_data.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_data.so" /d LONG_NAME="data_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_data.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_data.so" /d LONG_NAME="data_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_deflate.mak b/modules/filters/mod_deflate.mak
index 3696a7df..95792842 100644
--- a/modules/filters/mod_deflate.mak
+++ b/modules/filters/mod_deflate.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_deflate.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_deflate.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_deflate.so" /d LONG_NAME="deflate_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_deflate.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_deflate.so" /d LONG_NAME="deflate_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_deflate - Win32 Debug"
"$(INTDIR)\mod_deflate.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_deflate.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_deflate.so" /d LONG_NAME="deflate_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_deflate.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_deflate.so" /d LONG_NAME="deflate_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_ext_filter.mak b/modules/filters/mod_ext_filter.mak
index 638515b2..f03bff36 100644
--- a/modules/filters/mod_ext_filter.mak
+++ b/modules/filters/mod_ext_filter.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_ext_filter.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ext_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_ext_filter.so" /d LONG_NAME="ext_filter_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ext_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_ext_filter.so" /d LONG_NAME="ext_filter_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_ext_filter - Win32 Debug"
"$(INTDIR)\mod_ext_filter.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ext_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_ext_filter.so" /d LONG_NAME="ext_filter_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ext_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_ext_filter.so" /d LONG_NAME="ext_filter_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_filter.mak b/modules/filters/mod_filter.mak
index ab0f340c..c753d9bb 100644
--- a/modules/filters/mod_filter.mak
+++ b/modules/filters/mod_filter.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_filter.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_filter.so" /d LONG_NAME="filter_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_filter.so" /d LONG_NAME="filter_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_filter - Win32 Debug"
"$(INTDIR)\mod_filter.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_filter.so" /d LONG_NAME="filter_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_filter.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_filter.so" /d LONG_NAME="filter_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_include.c b/modules/filters/mod_include.c
index 0f3527c0..5a6271c5 100644
--- a/modules/filters/mod_include.c
+++ b/modules/filters/mod_include.c
@@ -1763,6 +1763,15 @@ static int find_file(request_rec *r, const char *directive, const char *tag,
}
/*
+ * <!--#comment blah blah blah ... -->
+ */
+static apr_status_t handle_comment(include_ctx_t *ctx, ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ return APR_SUCCESS;
+}
+
+/*
* <!--#include virtual|file="..." [onerror|virtual|file="..."] ... -->
*
* Output each file/virtual in turn until one of them returns an error.
@@ -4160,6 +4169,7 @@ static int include_post_config(apr_pool_t *p, apr_pool_t *plog,
ssi_pfn_register("endif", handle_endif);
ssi_pfn_register("fsize", handle_fsize);
ssi_pfn_register("config", handle_config);
+ ssi_pfn_register("comment", handle_comment);
ssi_pfn_register("include", handle_include);
ssi_pfn_register("flastmod", handle_flastmod);
ssi_pfn_register("printenv", handle_printenv);
diff --git a/modules/filters/mod_include.mak b/modules/filters/mod_include.mak
index 9b61e328..a9c3fedc 100644
--- a/modules/filters/mod_include.mak
+++ b/modules/filters/mod_include.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_include.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_include.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_include.so" /d LONG_NAME="include_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_include.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_include.so" /d LONG_NAME="include_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_include - Win32 Debug"
"$(INTDIR)\mod_include.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_include.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_include.so" /d LONG_NAME="include_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_include.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_include.so" /d LONG_NAME="include_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_proxy_html.mak b/modules/filters/mod_proxy_html.mak
index 4fc3a936..c3579beb 100644
--- a/modules/filters/mod_proxy_html.mak
+++ b/modules/filters/mod_proxy_html.mak
@@ -252,14 +252,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\httpd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_html.so" /d LONG_NAME="proxy_html_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_html.so" /d LONG_NAME="proxy_html_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_html - Win32 Debug"
"$(INTDIR)\httpd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_html.so" /d LONG_NAME="proxy_html_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_html.so" /d LONG_NAME="proxy_html_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_ratelimit.mak b/modules/filters/mod_ratelimit.mak
index b45a7b3f..e50e8922 100644
--- a/modules/filters/mod_ratelimit.mak
+++ b/modules/filters/mod_ratelimit.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_ratelimit.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ratelimit.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_ratelimit.so" /d LONG_NAME="ratelimit_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ratelimit.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_ratelimit.so" /d LONG_NAME="ratelimit_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_ratelimit - Win32 Debug"
"$(INTDIR)\mod_ratelimit.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ratelimit.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_ratelimit.so" /d LONG_NAME="ratelimit_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ratelimit.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_ratelimit.so" /d LONG_NAME="ratelimit_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_reflector.mak b/modules/filters/mod_reflector.mak
index 18c3e86a..d38b35c1 100644
--- a/modules/filters/mod_reflector.mak
+++ b/modules/filters/mod_reflector.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_reflector.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reflector.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_reflector.so" /d LONG_NAME="reflector_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reflector.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_reflector.so" /d LONG_NAME="reflector_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_reflector - Win32 Debug"
"$(INTDIR)\mod_reflector.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reflector.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_reflector.so" /d LONG_NAME="reflector_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reflector.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_reflector.so" /d LONG_NAME="reflector_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_reqtimeout.mak b/modules/filters/mod_reqtimeout.mak
index 1262bf46..459272e2 100644
--- a/modules/filters/mod_reqtimeout.mak
+++ b/modules/filters/mod_reqtimeout.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_reqtimeout.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reqtimeout.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_reqtimeout.so" /d LONG_NAME="reqtimeout_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reqtimeout.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_reqtimeout.so" /d LONG_NAME="reqtimeout_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_reqtimeout - Win32 Debug"
"$(INTDIR)\mod_reqtimeout.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reqtimeout.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_reqtimeout.so" /d LONG_NAME="reqtimeout_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_reqtimeout.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_reqtimeout.so" /d LONG_NAME="reqtimeout_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_request.mak b/modules/filters/mod_request.mak
index 0f89a219..7ceb603b 100644
--- a/modules/filters/mod_request.mak
+++ b/modules/filters/mod_request.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_request.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_request.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_request.so" /d LONG_NAME="request_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_request.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_request.so" /d LONG_NAME="request_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_request - Win32 Debug"
"$(INTDIR)\mod_request.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_request.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_request.so" /d LONG_NAME="request_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_request.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_request.so" /d LONG_NAME="request_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_sed.mak b/modules/filters/mod_sed.mak
index 24bbda70..c997b237 100644
--- a/modules/filters/mod_sed.mak
+++ b/modules/filters/mod_sed.mak
@@ -343,14 +343,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_sed.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_sed.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_sed.so" /d LONG_NAME="sed_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_sed.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_sed.so" /d LONG_NAME="sed_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_sed - Win32 Debug"
"$(INTDIR)\mod_sed.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_sed.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_sed.so" /d LONG_NAME="sed_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_sed.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_sed.so" /d LONG_NAME="sed_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_substitute.mak b/modules/filters/mod_substitute.mak
index 040775a4..f1538e82 100644
--- a/modules/filters/mod_substitute.mak
+++ b/modules/filters/mod_substitute.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_substitute.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_substitute.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_substitute.so" /d LONG_NAME="substitute_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_substitute.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_substitute.so" /d LONG_NAME="substitute_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_substitute - Win32 Debug"
"$(INTDIR)\mod_substitute.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_substitute.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_substitute.so" /d LONG_NAME="substitute_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_substitute.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_substitute.so" /d LONG_NAME="substitute_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/mod_xml2enc.mak b/modules/filters/mod_xml2enc.mak
index fb253044..028c736b 100644
--- a/modules/filters/mod_xml2enc.mak
+++ b/modules/filters/mod_xml2enc.mak
@@ -252,14 +252,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\httpd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_xml2enc.so" /d LONG_NAME="xml2enc_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_xml2enc.so" /d LONG_NAME="xml2enc_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_xml2enc - Win32 Debug"
"$(INTDIR)\httpd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_xml2enc.so" /d LONG_NAME="xml2enc_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\httpd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../srclib/apr-util/include" /i "../../srclib/libxml2/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_xml2enc.so" /d LONG_NAME="xml2enc_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/filters/sed0.c b/modules/filters/sed0.c
index ddc4bfed..a044f647 100644
--- a/modules/filters/sed0.c
+++ b/modules/filters/sed0.c
@@ -588,7 +588,7 @@ jtcommon:
command_errf(commands, SEDERR_SMMES, commands->linebuf);
return -1;
}
- if (text(commands, fnamebuf, &fnamebuf[APR_PATH_MAX]) == NULL) {
+ if (text(commands, fnamebuf, &fnamebuf[APR_PATH_MAX-1]) == NULL) {
command_errf(commands, SEDERR_FNTL, commands->linebuf);
return -1;
}
@@ -617,7 +617,7 @@ jtcommon:
command_errf(commands, SEDERR_SMMES, commands->linebuf);
return -1;
}
- if (text(commands, fnamebuf, &fnamebuf[APR_PATH_MAX]) == NULL) {
+ if (text(commands, fnamebuf, &fnamebuf[APR_PATH_MAX-1]) == NULL) {
command_errf(commands, SEDERR_FNTL, commands->linebuf);
return -1;
}
diff --git a/modules/filters/sed1.c b/modules/filters/sed1.c
index 739e1ce9..8f383b8b 100644
--- a/modules/filters/sed1.c
+++ b/modules/filters/sed1.c
@@ -235,6 +235,7 @@ static void copy_to_genbuf(sed_eval_t *eval, const char* sz)
if (eval->gsize < reqsize) {
grow_gen_buffer(eval, reqsize, NULL);
}
+ memcpy(eval->genbuf, sz, len + 1);
}
/*
diff --git a/modules/generators/mod_asis.mak b/modules/generators/mod_asis.mak
index 73ff6f61..f069aac1 100644
--- a/modules/generators/mod_asis.mak
+++ b/modules/generators/mod_asis.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_asis.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_asis.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_asis.so" /d LONG_NAME="asis_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_asis.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_asis.so" /d LONG_NAME="asis_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_asis - Win32 Debug"
"$(INTDIR)\mod_asis.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_asis.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_asis.so" /d LONG_NAME="asis_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_asis.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_asis.so" /d LONG_NAME="asis_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/generators/mod_autoindex.mak b/modules/generators/mod_autoindex.mak
index 86aabc67..daebb5dc 100644
--- a/modules/generators/mod_autoindex.mak
+++ b/modules/generators/mod_autoindex.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_autoindex.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_autoindex.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_autoindex.so" /d LONG_NAME="autoindex_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_autoindex.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_autoindex.so" /d LONG_NAME="autoindex_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_autoindex - Win32 Debug"
"$(INTDIR)\mod_autoindex.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_autoindex.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_autoindex.so" /d LONG_NAME="autoindex_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_autoindex.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_autoindex.so" /d LONG_NAME="autoindex_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/generators/mod_cgi.mak b/modules/generators/mod_cgi.mak
index 98ac24de..bd5e7e70 100644
--- a/modules/generators/mod_cgi.mak
+++ b/modules/generators/mod_cgi.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_cgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_cgi.so" /d LONG_NAME="cgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_cgi.so" /d LONG_NAME="cgi_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_cgi - Win32 Debug"
"$(INTDIR)\mod_cgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_cgi.so" /d LONG_NAME="cgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_cgi.so" /d LONG_NAME="cgi_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/generators/mod_info.mak b/modules/generators/mod_info.mak
index cec35c52..b0de8a1e 100644
--- a/modules/generators/mod_info.mak
+++ b/modules/generators/mod_info.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_info.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_info.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_info.so" /d LONG_NAME="info_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_info.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_info.so" /d LONG_NAME="info_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_info - Win32 Debug"
"$(INTDIR)\mod_info.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_info.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_info.so" /d LONG_NAME="info_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_info.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_info.so" /d LONG_NAME="info_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/generators/mod_status.mak b/modules/generators/mod_status.mak
index 37f3b63b..168bbc8b 100644
--- a/modules/generators/mod_status.mak
+++ b/modules/generators/mod_status.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_status.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_status.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_status.so" /d LONG_NAME="status_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_status.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_status.so" /d LONG_NAME="status_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_status - Win32 Debug"
"$(INTDIR)\mod_status.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_status.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_status.so" /d LONG_NAME="status_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_status.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_status.so" /d LONG_NAME="status_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/http/http_core.c b/modules/http/http_core.c
index 345de810..57404e66 100644
--- a/modules/http/http_core.c
+++ b/modules/http/http_core.c
@@ -148,9 +148,9 @@ static int ap_process_http_async_connection(conn_rec *c)
c->keepalive = AP_CONN_UNKNOWN;
/* process the request if it was read without error */
- ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
if (r->status == HTTP_OK) {
cs->state = CONN_STATE_HANDLER;
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
ap_process_async_request(r);
/* After the call to ap_process_request, the
* request pool may have been deleted. We set
@@ -203,10 +203,10 @@ static int ap_process_http_sync_connection(conn_rec *c)
c->keepalive = AP_CONN_UNKNOWN;
/* process the request if it was read without error */
- ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
if (r->status == HTTP_OK) {
if (cs)
cs->state = CONN_STATE_HANDLER;
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
ap_process_request(r);
/* After the call to ap_process_request, the
* request pool will have been deleted. We set
diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
index 9aa0549b..582d87e3 100644
--- a/modules/http/http_protocol.c
+++ b/modules/http/http_protocol.c
@@ -146,7 +146,27 @@ static const char * const status_lines[RESPONSE_CODES] =
"429 Too Many Requests",
NULL, /* 430 */
"431 Request Header Fields Too Large",
-#define LEVEL_500 71
+ NULL, /* 432 */
+ NULL, /* 433 */
+ NULL, /* 434 */
+ NULL, /* 435 */
+ NULL, /* 436 */
+ NULL, /* 437 */
+ NULL, /* 438 */
+ NULL, /* 439 */
+ NULL, /* 440 */
+ NULL, /* 441 */
+ NULL, /* 442 */
+ NULL, /* 443 */
+ NULL, /* 444 */
+ NULL, /* 445 */
+ NULL, /* 446 */
+ NULL, /* 447 */
+ NULL, /* 448 */
+ NULL, /* 449 */
+ NULL, /* 450 */
+ "451 Unavailable For Legal Reasons",
+#define LEVEL_500 91
"500 Internal Server Error",
"501 Not Implemented",
"502 Bad Gateway",
@@ -1298,6 +1318,12 @@ static const char *get_canned_error_string(int status,
"request as the requested host name does not match\n"
"the Server Name Indication (SNI) in use for this\n"
"connection.</p>\n");
+ case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS:
+ s1 = apr_pstrcat(p,
+ "<p>Access to ", ap_escape_html(r->pool, r->uri),
+ "\nhas been denied for legal reasons.<br />\n",
+ NULL);
+ return(add_optional_notes(r, s1, "error-notes", "</p>\n"));
default: /* HTTP_INTERNAL_SERVER_ERROR */
/*
* This comparison to expose error-notes could be modified to
diff --git a/modules/http/mod_mime.mak b/modules/http/mod_mime.mak
index 50fc5263..14d106f4 100644
--- a/modules/http/mod_mime.mak
+++ b/modules/http/mod_mime.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_mime - Win32 Debug"
"$(INTDIR)\mod_mime.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_mime.so" /d LONG_NAME="mime_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/http2/NWGNUmakefile b/modules/http2/NWGNUmakefile
index e97efcaa..d4a51ed3 100644
--- a/modules/http2/NWGNUmakefile
+++ b/modules/http2/NWGNUmakefile
@@ -153,7 +153,7 @@ XDCDATA =
#
TARGET_nlm = \
$(OBJDIR)/mod_http2.nlm \
- $(OBJDIR)/mod_http2.nlm \
+ $(OBJDIR)/proxyht2.nlm \
$(EOLIST)
#
diff --git a/modules/http2/NWGNUmod_http2 b/modules/http2/NWGNUmod_http2
index 2a8a0fbb..e9c48a40 100644
--- a/modules/http2/NWGNUmod_http2
+++ b/modules/http2/NWGNUmod_http2
@@ -185,6 +185,7 @@ TARGET_lib = \
#
FILES_nlm_objs = \
$(OBJDIR)/h2_alt_svc.o \
+ $(OBJDIR)/h2_bucket_beam.o \
$(OBJDIR)/h2_bucket_eoc.o \
$(OBJDIR)/h2_bucket_eos.o \
$(OBJDIR)/h2_config.o \
@@ -194,9 +195,6 @@ FILES_nlm_objs = \
$(OBJDIR)/h2_filter.o \
$(OBJDIR)/h2_from_h1.o \
$(OBJDIR)/h2_h2.o \
- $(OBJDIR)/h2_int_queue.o \
- $(OBJDIR)/h2_io.o \
- $(OBJDIR)/h2_io_set.o \
$(OBJDIR)/h2_mplx.o \
$(OBJDIR)/h2_ngn_shed.o \
$(OBJDIR)/h2_push.o \
@@ -206,8 +204,6 @@ FILES_nlm_objs = \
$(OBJDIR)/h2_stream.o \
$(OBJDIR)/h2_switch.o \
$(OBJDIR)/h2_task.o \
- $(OBJDIR)/h2_task_input.o \
- $(OBJDIR)/h2_task_output.o \
$(OBJDIR)/h2_util.o \
$(OBJDIR)/h2_worker.o \
$(OBJDIR)/h2_workers.o \
@@ -363,7 +359,7 @@ $(OBJDIR)/mod_http2.imp : NWGNUmod_http2
@echo $(DL) h2_ihash_clear,$(DL) >> $@
@echo $(DL) h2_ihash_count,$(DL) >> $@
@echo $(DL) h2_ihash_create,$(DL) >> $@
- @echo $(DL) h2_ihash_is_empty,$(DL) >> $@
+ @echo $(DL) h2_ihash_empty,$(DL) >> $@
@echo $(DL) h2_ihash_iter,$(DL) >> $@
@echo $(DL) h2_ihash_remove,$(DL) >> $@
@echo $(DL) h2_iq_add,$(DL) >> $@
@@ -371,8 +367,10 @@ $(OBJDIR)/mod_http2.imp : NWGNUmod_http2
@echo $(DL) h2_iq_remove,$(DL) >> $@
@echo $(DL) h2_log2,$(DL) >> $@
@echo $(DL) h2_proxy_res_ignore_header,$(DL) >> $@
- @echo $(DL) h2_request_create,$(DL) >> $@
- @echo $(DL) h2_request_make,$(DL) >> $@
+ @echo $(DL) h2_headers_add_h1,$(DL) >> $@
+ @echo $(DL) h2_req_create,$(DL) >> $@
+ @echo $(DL) h2_req_createn,$(DL) >> $@
+ @echo $(DL) h2_req_make,$(DL) >> $@
@echo $(DL) h2_util_camel_case_header,$(DL) >> $@
@echo $(DL) h2_util_frame_print,$(DL) >> $@
@echo $(DL) h2_util_ngheader_make_req,$(DL) >> $@
@@ -391,6 +389,7 @@ $(OBJDIR)/mod_http2.imp : NWGNUmod_http2
@echo $(DL) nghttp2_session_callbacks_set_send_callback,$(DL) >> $@
@echo $(DL) nghttp2_session_client_new2,$(DL) >> $@
@echo $(DL) nghttp2_session_consume,$(DL) >> $@
+ @echo $(DL) nghttp2_session_consume_connection,$(DL) >> $@
@echo $(DL) nghttp2_session_del,$(DL) >> $@
@echo $(DL) nghttp2_session_get_remote_settings,$(DL) >> $@
@echo $(DL) nghttp2_session_get_stream_user_data,$(DL) >> $@
diff --git a/modules/http2/NWGNUproxyht2 b/modules/http2/NWGNUproxyht2
new file mode 100644
index 00000000..7153d084
--- /dev/null
+++ b/modules/http2/NWGNUproxyht2
@@ -0,0 +1,287 @@
+#
+# This Makefile requires the environment var NGH2SRC
+# pointing to the base directory of nghttp2 source tree.
+#
+
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NGH2SRC)/lib/includes \
+ $(STDMOD)/proxy \
+ $(SERVER)/mpm/NetWare \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ -L$(OBJDIR) \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxyht2
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Proxy module
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = $(NLM_NAME)
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_proxy_http2.o \
+ $(OBJDIR)/h2_proxy_session.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Libc \
+ Apache2 \
+ mod_proxy \
+ mod_http2 \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ @$(OBJDIR)/mod_http2.imp \
+ ap_proxy_acquire_connection \
+ ap_proxy_canon_netloc \
+ ap_proxy_canonenc \
+ ap_proxy_connect_backend \
+ ap_proxy_connection_create \
+ ap_proxy_cookie_reverse_map \
+ ap_proxy_determine_connection \
+ ap_proxy_location_reverse_map \
+ ap_proxy_port_of_scheme \
+ ap_proxy_release_connection \
+ ap_proxy_ssl_connection_cleanup \
+ ap_sock_disable_nagle \
+ proxy_hook_canon_handler \
+ proxy_hook_scheme_handler \
+ proxy_module \
+ proxy_run_detach_backend \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_http2_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs :=
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+clean ::
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
index e94da222..a77ad808 100644
--- a/modules/http2/config2.m4
+++ b/modules/http2/config2.m4
@@ -20,6 +20,7 @@ dnl # list of module object files
http2_objs="dnl
mod_http2.lo dnl
h2_alt_svc.lo dnl
+h2_bucket_beam.lo dnl
h2_bucket_eoc.lo dnl
h2_bucket_eos.lo dnl
h2_config.lo dnl
@@ -29,9 +30,6 @@ h2_ctx.lo dnl
h2_filter.lo dnl
h2_from_h1.lo dnl
h2_h2.lo dnl
-h2_int_queue.lo dnl
-h2_io.lo dnl
-h2_io_set.lo dnl
h2_mplx.lo dnl
h2_ngn_shed.lo dnl
h2_push.lo dnl
@@ -41,8 +39,6 @@ h2_session.lo dnl
h2_stream.lo dnl
h2_switch.lo dnl
h2_task.lo dnl
-h2_task_input.lo dnl
-h2_task_output.lo dnl
h2_util.lo dnl
h2_worker.lo dnl
h2_workers.lo dnl
@@ -104,7 +100,6 @@ AC_DEFUN([APACHE_CHECK_NGHTTP2],[
pkglookup="`$PKGCONFIG --cflags-only-I libnghttp2`"
APR_ADDTO(CPPFLAGS, [$pkglookup])
APR_ADDTO(MOD_CFLAGS, [$pkglookup])
- APR_ADDTO(ab_CFLAGS, [$pkglookup])
pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-L libnghttp2`"
APR_ADDTO(LDFLAGS, [$pkglookup])
APR_ADDTO(MOD_LDFLAGS, [$pkglookup])
@@ -119,7 +114,6 @@ AC_DEFUN([APACHE_CHECK_NGHTTP2],[
if test "x$ap_nghttp2_base" != "x" -a "x$ap_nghttp2_found" = "x"; then
APR_ADDTO(CPPFLAGS, [-I$ap_nghttp2_base/include])
APR_ADDTO(MOD_CFLAGS, [-I$ap_nghttp2_base/include])
- APR_ADDTO(ab_CFLAGS, [-I$ap_nghttp2_base/include])
APR_ADDTO(LDFLAGS, [-L$ap_nghttp2_base/lib])
APR_ADDTO(MOD_LDFLAGS, [-L$ap_nghttp2_base/lib])
if test "x$ap_platform_runtime_link_flag" != "x"; then
@@ -144,9 +138,6 @@ AC_DEFUN([APACHE_CHECK_NGHTTP2],[
ap_nghttp2_libs="${ap_nghttp2_libs:--lnghttp2} `$apr_config --libs`"
APR_ADDTO(MOD_LDFLAGS, [$ap_nghttp2_libs])
APR_ADDTO(LIBS, [$ap_nghttp2_libs])
- APR_SETVAR(ab_LDFLAGS, [$MOD_LDFLAGS])
- APACHE_SUBST(ab_CFLAGS)
- APACHE_SUBST(ab_LDFLAGS)
dnl Run library and function checks
liberrors=""
@@ -206,6 +197,29 @@ is usually linked shared and requires loading. ], $http2_objs, , most, [
+dnl # list of module object files
+proxy_http2_objs="dnl
+mod_proxy_http2.lo dnl
+h2_proxy_session.lo dnl
+h2_proxy_util.lo dnl
+"
+
+dnl # hook module into the Autoconf mechanism (--enable-proxy_http2)
+APACHE_MODULE(proxy_http2, [HTTP/2 proxy module. This module requires a libnghttp2 installation.
+See --with-nghttp2 on how to manage non-standard locations. Also requires --enable-proxy.], $proxy_http2_objs, , no, [
+ APACHE_CHECK_NGHTTP2
+ if test "$ac_cv_nghttp2" = "yes" ; then
+ if test "x$enable_http2" = "xshared"; then
+ # The only symbol which needs to be exported is the module
+ # structure, so ask libtool to hide everything else:
+ APR_ADDTO(MOD_PROXY_HTTP2_LDADD, [-export-symbols-regex proxy_http2_module])
+ fi
+ else
+ enable_proxy_http2=no
+ fi
+], proxy)
+
+
dnl # end of module specific part
APACHE_MODPATH_FINISH
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
index acb79cd2..9075b00a 100644
--- a/modules/http2/h2.h
+++ b/modules/http2/h2.h
@@ -149,6 +149,9 @@ struct h2_response {
const char *sos_filter;
};
+typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
+
+typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx);
/* Note key to attach connection task id to conn_rec/request_rec instances */
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
new file mode 100644
index 00000000..cf2cb84d
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.c
@@ -0,0 +1,1015 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+#include <apr_buckets.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_util.h"
+#include "h2_bucket_beam.h"
+
+static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy);
+
+#define H2_BPROXY_NEXT(e) APR_RING_NEXT((e), link)
+#define H2_BPROXY_PREV(e) APR_RING_PREV((e), link)
+#define H2_BPROXY_REMOVE(e) APR_RING_REMOVE((e), link)
+
+#define H2_BPROXY_LIST_INIT(b) APR_RING_INIT(&(b)->list, h2_beam_proxy, link);
+#define H2_BPROXY_LIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, h2_beam_proxy, link)
+#define H2_BPROXY_LIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, h2_beam_proxy, link)
+#define H2_BPROXY_LIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BPROXY_LIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_PROXY_BLIST_INSERT_HEAD(b, e) do { \
+ h2_beam_proxy *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_INSERT_TAIL(b, e) do { \
+ h2_beam_proxy *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+ } while (0)
+
+
+/*******************************************************************************
+ * beam bucket with reference to beam and bucket it represents
+ ******************************************************************************/
+
+const apr_bucket_type_t h2_bucket_type_beam;
+
+#define H2_BUCKET_IS_BEAM(e) (e->type == &h2_bucket_type_beam)
+
+struct h2_beam_proxy {
+ apr_bucket_refcount refcount;
+ APR_RING_ENTRY(h2_beam_proxy) link;
+ h2_bucket_beam *beam;
+ apr_bucket *bred;
+ apr_size_t n;
+};
+
+static const char Dummy = '\0';
+
+static apr_status_t beam_bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ h2_beam_proxy *d = b->data;
+ if (d->bred) {
+ const char *data;
+ apr_status_t status = apr_bucket_read(d->bred, &data, len, block);
+ if (status == APR_SUCCESS) {
+ *str = data + b->start;
+ *len = b->length;
+ }
+ return status;
+ }
+ *str = &Dummy;
+ *len = 0;
+ return APR_ECONNRESET;
+}
+
+static void beam_bucket_destroy(void *data)
+{
+ h2_beam_proxy *d = data;
+
+ if (apr_bucket_shared_destroy(d)) {
+ /* When the beam gets destroyed before this bucket, it will
+ * NULLify its reference here. This is not protected by a mutex,
+ * so it will not help with race conditions.
+ * But it lets us shut down memory pool with circulare beam
+ * references. */
+ if (d->beam) {
+ h2_beam_emitted(d->beam, d);
+ }
+ apr_bucket_free(d);
+ }
+}
+
+static apr_bucket * h2_beam_bucket_make(apr_bucket *b,
+ h2_bucket_beam *beam,
+ apr_bucket *bred, apr_size_t n)
+{
+ h2_beam_proxy *d;
+
+ d = apr_bucket_alloc(sizeof(*d), b->list);
+ H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d);
+ d->beam = beam;
+ d->bred = bred;
+ d->n = n;
+
+ b = apr_bucket_shared_make(b, d, 0, bred? bred->length : 0);
+ b->type = &h2_bucket_type_beam;
+
+ return b;
+}
+
+static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
+ apr_bucket *bred,
+ apr_bucket_alloc_t *list,
+ apr_size_t n)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ return h2_beam_bucket_make(b, beam, bred, n);
+}
+
+/*static apr_status_t beam_bucket_setaside(apr_bucket *b, apr_pool_t *pool)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_beam_proxy *d = b->data;
+ if (d->bred) {
+ const char *data;
+ apr_size_t len;
+
+ status = apr_bucket_read(d->bred, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ b = apr_bucket_heap_make(b, (char *)data + b->start, b->length, NULL);
+ if (b == NULL) {
+ return APR_ENOMEM;
+ }
+ }
+ }
+ return status;
+}*/
+
+const apr_bucket_type_t h2_bucket_type_beam = {
+ "BEAM", 5, APR_BUCKET_DATA,
+ beam_bucket_destroy,
+ beam_bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_shared_split,
+ apr_bucket_shared_copy
+};
+
+/*******************************************************************************
+ * h2_blist, a brigade without allocations
+ ******************************************************************************/
+
+apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ h2_blist *bl)
+{
+ apr_size_t off = 0;
+ const char *sp = "";
+ apr_bucket *b;
+
+ if (bl) {
+ memset(buffer, 0, bmax--);
+ off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
+ for (b = H2_BLIST_FIRST(bl);
+ bmax && (b != H2_BLIST_SENTINEL(bl));
+ b = APR_BUCKET_NEXT(b)) {
+
+ off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
+ sp = " ";
+ }
+ off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
+ }
+ return off;
+}
+
+
+
+/*******************************************************************************
+ * bucket beam that can transport buckets across threads
+ ******************************************************************************/
+
+static apr_status_t enter_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ h2_beam_mutex_enter *enter = beam->m_enter;
+ if (enter) {
+ void *ctx = beam->m_ctx;
+ if (ctx) {
+ return enter(ctx, pbl);
+ }
+ }
+ pbl->mutex = NULL;
+ pbl->leave = NULL;
+ return APR_SUCCESS;
+}
+
+static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ if (pbl->leave) {
+ pbl->leave(pbl->leave_ctx, pbl->mutex);
+ }
+}
+
+static apr_off_t calc_buffered(h2_bucket_beam *beam)
+{
+ apr_off_t len = 0;
+ apr_bucket *b;
+ for (b = H2_BLIST_FIRST(&beam->red);
+ b != H2_BLIST_SENTINEL(&beam->red);
+ b = APR_BUCKET_NEXT(b)) {
+ if (b->length == ((apr_size_t)-1)) {
+ /* do not count */
+ }
+ else if (APR_BUCKET_IS_FILE(b)) {
+ /* if unread, has no real mem footprint. how to test? */
+ }
+ else {
+ len += b->length;
+ }
+ }
+ return len;
+}
+
+static void r_purge_reds(h2_bucket_beam *beam)
+{
+ apr_bucket *bred;
+ /* delete all red buckets in purge brigade, needs to be called
+ * from red thread only */
+ while (!H2_BLIST_EMPTY(&beam->purge)) {
+ bred = H2_BLIST_FIRST(&beam->purge);
+ apr_bucket_delete(bred);
+ }
+}
+
+static apr_size_t calc_space_left(h2_bucket_beam *beam)
+{
+ if (beam->max_buf_size > 0) {
+ apr_off_t len = calc_buffered(beam);
+ return (beam->max_buf_size > len? (beam->max_buf_size - len) : 0);
+ }
+ return APR_SIZE_MAX;
+}
+
+static apr_status_t wait_cond(h2_bucket_beam *beam, apr_thread_mutex_t *lock)
+{
+ if (beam->timeout > 0) {
+ return apr_thread_cond_timedwait(beam->m_cond, lock, beam->timeout);
+ }
+ else {
+ return apr_thread_cond_wait(beam->m_cond, lock);
+ }
+}
+
+static apr_status_t r_wait_space(h2_bucket_beam *beam, apr_read_type_e block,
+ h2_beam_lock *pbl, apr_off_t *premain)
+{
+ *premain = calc_space_left(beam);
+ while (!beam->aborted && *premain <= 0
+ && (block == APR_BLOCK_READ) && pbl->mutex) {
+ apr_status_t status = wait_cond(beam, pbl->mutex);
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ return status;
+ }
+ r_purge_reds(beam);
+ *premain = calc_space_left(beam);
+ }
+ return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
+}
+
+static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
+{
+ h2_beam_lock bl;
+ apr_bucket *b, *next;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ /* even when beam buckets are split, only the one where
+ * refcount drops to 0 will call us */
+ H2_BPROXY_REMOVE(proxy);
+ /* invoked from green thread, the last beam bucket for the red
+ * bucket bred is about to be destroyed.
+ * remove it from the hold, where it should be now */
+ if (proxy->bred) {
+ for (b = H2_BLIST_FIRST(&beam->hold);
+ b != H2_BLIST_SENTINEL(&beam->hold);
+ b = APR_BUCKET_NEXT(b)) {
+ if (b == proxy->bred) {
+ break;
+ }
+ }
+ if (b != H2_BLIST_SENTINEL(&beam->hold)) {
+ /* bucket is in hold as it should be, mark this one
+ * and all before it for purging. We might have placed meta
+ * buckets without a green proxy into the hold before it
+ * and schedule them for purging now */
+ for (b = H2_BLIST_FIRST(&beam->hold);
+ b != H2_BLIST_SENTINEL(&beam->hold);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (b == proxy->bred) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->purge, b);
+ break;
+ }
+ else if (APR_BUCKET_IS_METADATA(b)) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->purge, b);
+ }
+ else {
+ /* another data bucket before this one in hold. this
+ * is normal since DATA buckets need not be destroyed
+ * in order */
+ }
+ }
+
+ proxy->bred = NULL;
+ }
+ else {
+ /* it should be there unless we screwed up */
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->red_pool,
+ APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
+ "in hold, n=%d", beam->id, beam->tag,
+ (int)proxy->n);
+ AP_DEBUG_ASSERT(!proxy->bred);
+ }
+ }
+ /* notify anyone waiting on space to become available */
+ if (!bl.mutex) {
+ r_purge_reds(beam);
+ }
+ else if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ leave_yellow(beam, &bl);
+ }
+}
+
+static void report_consumption(h2_bucket_beam *beam, int force)
+{
+ if (force || beam->received_bytes != beam->reported_consumed_bytes) {
+ if (beam->consumed_fn) {
+ beam->consumed_fn(beam->consumed_ctx, beam, beam->received_bytes
+ - beam->reported_consumed_bytes);
+ }
+ beam->reported_consumed_bytes = beam->received_bytes;
+ }
+}
+
+static void report_production(h2_bucket_beam *beam, int force)
+{
+ if (force || beam->sent_bytes != beam->reported_produced_bytes) {
+ if (beam->produced_fn) {
+ beam->produced_fn(beam->produced_ctx, beam, beam->sent_bytes
+ - beam->reported_produced_bytes);
+ }
+ beam->reported_produced_bytes = beam->sent_bytes;
+ }
+}
+
+static void h2_blist_cleanup(h2_blist *bl)
+{
+ apr_bucket *e;
+
+ while (!H2_BLIST_EMPTY(bl)) {
+ e = H2_BLIST_FIRST(bl);
+ apr_bucket_delete(e);
+ }
+}
+
+static apr_status_t beam_close(h2_bucket_beam *beam)
+{
+ if (!beam->closed) {
+ beam->closed = 1;
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t beam_cleanup(void *data)
+{
+ h2_bucket_beam *beam = data;
+
+ beam_close(beam);
+ r_purge_reds(beam);
+ h2_blist_cleanup(&beam->red);
+ report_consumption(beam, 0);
+ while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) {
+ h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
+ H2_BPROXY_REMOVE(proxy);
+ proxy->beam = NULL;
+ proxy->bred = NULL;
+ }
+ h2_blist_cleanup(&beam->purge);
+ h2_blist_cleanup(&beam->hold);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam)
+{
+ apr_pool_cleanup_kill(beam->red_pool, beam, beam_cleanup);
+ return beam_cleanup(beam);
+}
+
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *red_pool,
+ int id, const char *tag,
+ apr_size_t max_buf_size)
+{
+ h2_bucket_beam *beam;
+ apr_status_t status = APR_SUCCESS;
+
+ beam = apr_pcalloc(red_pool, sizeof(*beam));
+ if (!beam) {
+ return APR_ENOMEM;
+ }
+
+ beam->id = id;
+ beam->tag = tag;
+ H2_BLIST_INIT(&beam->red);
+ H2_BLIST_INIT(&beam->hold);
+ H2_BLIST_INIT(&beam->purge);
+ H2_BPROXY_LIST_INIT(&beam->proxies);
+ beam->red_pool = red_pool;
+ beam->max_buf_size = max_buf_size;
+
+ apr_pool_pre_cleanup_register(red_pool, beam, beam_cleanup);
+ *pbeam = beam;
+
+ return status;
+}
+
+void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->max_buf_size = buffer_size;
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ apr_size_t buffer_size = 0;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ buffer_size = beam->max_buf_size;
+ leave_yellow(beam, &bl);
+ }
+ return buffer_size;
+}
+
+void h2_beam_mutex_set(h2_bucket_beam *beam,
+ h2_beam_mutex_enter m_enter,
+ apr_thread_cond_t *cond,
+ void *m_ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->m_enter = m_enter;
+ beam->m_ctx = m_ctx;
+ beam->m_cond = cond;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->timeout = timeout;
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ apr_interval_time_t timeout = 0;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ timeout = beam->timeout;
+ leave_yellow(beam, &bl);
+ }
+ return timeout;
+}
+
+void h2_beam_abort(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_reds(beam);
+ h2_blist_cleanup(&beam->red);
+ beam->aborted = 1;
+ report_consumption(beam, 0);
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_status_t h2_beam_close(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_reds(beam);
+ beam_close(beam);
+ report_consumption(beam, 0);
+ leave_yellow(beam, &bl);
+ }
+ return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
+}
+
+apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block,
+ int clear_buffers)
+{
+ apr_status_t status;
+ h2_beam_lock bl;
+
+ if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) {
+ if (clear_buffers) {
+ r_purge_reds(beam);
+ h2_blist_cleanup(&beam->red);
+ }
+ beam_close(beam);
+
+ while (status == APR_SUCCESS
+ && (!H2_BPROXY_LIST_EMPTY(&beam->proxies)
+ || (beam->green && !APR_BRIGADE_EMPTY(beam->green)))) {
+ if (block == APR_NONBLOCK_READ || !bl.mutex) {
+ status = APR_EAGAIN;
+ break;
+ }
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ status = wait_cond(beam, bl.mutex);
+ }
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+static apr_status_t append_bucket(h2_bucket_beam *beam,
+ apr_bucket *bred,
+ apr_read_type_e block,
+ apr_pool_t *pool,
+ h2_beam_lock *pbl)
+{
+ const char *data;
+ apr_size_t len;
+ apr_off_t space_left = 0;
+ apr_status_t status;
+
+ if (APR_BUCKET_IS_METADATA(bred)) {
+ if (APR_BUCKET_IS_EOS(bred)) {
+ beam->closed = 1;
+ }
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->red, bred);
+ return APR_SUCCESS;
+ }
+ else if (APR_BUCKET_IS_FILE(bred)) {
+ /* file bucket lengths do not really count */
+ }
+ else {
+ space_left = calc_space_left(beam);
+ if (space_left > 0 && bred->length == ((apr_size_t)-1)) {
+ const char *data;
+ status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (space_left < bred->length) {
+ status = r_wait_space(beam, block, pbl, &space_left);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ if (space_left <= 0) {
+ return APR_EAGAIN;
+ }
+ }
+ /* space available, maybe need bucket split */
+ }
+
+
+ /* The fundamental problem is that reading a red bucket from
+ * a green thread is a total NO GO, because the bucket might use
+ * its pool/bucket_alloc from a foreign thread and that will
+ * corrupt. */
+ status = APR_ENOTIMPL;
+ if (beam->closed && bred->length > 0) {
+ status = APR_EOF;
+ }
+ else if (APR_BUCKET_IS_TRANSIENT(bred)) {
+ /* this takes care of transient buckets and converts them
+ * into heap ones. Other bucket types might or might not be
+ * affected by this. */
+ status = apr_bucket_setaside(bred, pool);
+ }
+ else if (APR_BUCKET_IS_HEAP(bred)) {
+ /* For heap buckets read from a green thread is fine. The
+ * data will be there and live until the bucket itself is
+ * destroyed. */
+ status = APR_SUCCESS;
+ }
+ else if (APR_BUCKET_IS_POOL(bred)) {
+ /* pool buckets are bastards that register at pool cleanup
+ * to morph themselves into heap buckets. That may happen anytime,
+ * even after the bucket data pointer has been read. So at
+ * any time inside the green thread, the pool bucket memory
+ * may disappear. yikes. */
+ status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ apr_bucket_heap_make(bred, data, len, NULL);
+ }
+ }
+ else if (APR_BUCKET_IS_FILE(bred)) {
+ /* For file buckets the problem is their internal readpool that
+ * is used on the first read to allocate buffer/mmap.
+ * Since setting aside a file bucket will de-register the
+ * file cleanup function from the previous pool, we need to
+ * call that from a red thread.
+ * Additionally, we allow callbacks to prevent beaming file
+ * handles across. The use case for this is to limit the number
+ * of open file handles and rather use a less efficient beam
+ * transport. */
+ apr_file_t *fd = ((apr_bucket_file *)bred->data)->fd;
+ int can_beam = 1;
+ if (beam->last_beamed != fd && beam->can_beam_fn) {
+ can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
+ }
+ if (can_beam) {
+ beam->last_beamed = fd;
+ status = apr_bucket_setaside(bred, pool);
+ }
+ /* else: enter ENOTIMPL case below */
+ }
+
+ if (status == APR_ENOTIMPL) {
+ /* we have no knowledge about the internals of this bucket,
+ * but hope that after read, its data stays immutable for the
+ * lifetime of the bucket. (see pool bucket handling above for
+ * a counter example).
+ * We do the read while in a red thread, so that the bucket may
+ * use pools/allocators safely. */
+ if (space_left < APR_BUCKET_BUFF_SIZE) {
+ space_left = APR_BUCKET_BUFF_SIZE;
+ }
+ if (space_left < bred->length) {
+ apr_bucket_split(bred, space_left);
+ }
+ status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ status = apr_bucket_setaside(bred, pool);
+ }
+ }
+
+ if (status != APR_SUCCESS && status != APR_ENOTIMPL) {
+ return status;
+ }
+
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->red, bred);
+ beam->sent_bytes += bred->length;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_send(h2_bucket_beam *beam,
+ apr_bucket_brigade *red_brigade,
+ apr_read_type_e block)
+{
+ apr_bucket *bred;
+ apr_status_t status = APR_SUCCESS;
+ h2_beam_lock bl;
+
+ /* Called from the red thread to add buckets to the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_reds(beam);
+
+ if (beam->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else if (red_brigade) {
+ int force_report = !APR_BRIGADE_EMPTY(red_brigade);
+ while (!APR_BRIGADE_EMPTY(red_brigade)
+ && status == APR_SUCCESS) {
+ bred = APR_BRIGADE_FIRST(red_brigade);
+ status = append_bucket(beam, bred, block, beam->red_pool, &bl);
+ }
+ report_production(beam, force_report);
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ }
+ report_consumption(beam, 0);
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_beam_lock bl;
+ apr_bucket *bred, *bgreen, *ng;
+ int transferred = 0;
+ apr_status_t status = APR_SUCCESS;
+ apr_off_t remain = readbytes;
+
+ /* Called from the green thread to take buckets from the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+transfer:
+ if (beam->aborted) {
+ if (beam->green && !APR_BRIGADE_EMPTY(beam->green)) {
+ apr_brigade_cleanup(beam->green);
+ }
+ status = APR_ECONNABORTED;
+ goto leave;
+ }
+
+ /* transfer enough buckets from our green brigade, if we have one */
+ while (beam->green
+ && !APR_BRIGADE_EMPTY(beam->green)
+ && (readbytes <= 0 || remain >= 0)) {
+ bgreen = APR_BRIGADE_FIRST(beam->green);
+ if (readbytes > 0 && bgreen->length > 0 && remain <= 0) {
+ break;
+ }
+ APR_BUCKET_REMOVE(bgreen);
+ APR_BRIGADE_INSERT_TAIL(bb, bgreen);
+ remain -= bgreen->length;
+ ++transferred;
+ }
+
+ /* transfer from our red brigade, transforming red buckets to
+ * green ones until we have enough */
+ while (!H2_BLIST_EMPTY(&beam->red) && (readbytes <= 0 || remain >= 0)) {
+ bred = H2_BLIST_FIRST(&beam->red);
+ bgreen = NULL;
+
+ if (readbytes > 0 && bred->length > 0 && remain <= 0) {
+ break;
+ }
+
+ if (APR_BUCKET_IS_METADATA(bred)) {
+ if (APR_BUCKET_IS_EOS(bred)) {
+ bgreen = apr_bucket_eos_create(bb->bucket_alloc);
+ beam->close_sent = 1;
+ }
+ else if (APR_BUCKET_IS_FLUSH(bred)) {
+ bgreen = apr_bucket_flush_create(bb->bucket_alloc);
+ }
+ else {
+ /* put red into hold, no green sent out */
+ }
+ }
+ else if (APR_BUCKET_IS_FILE(bred)) {
+ /* This is set aside into the target brigade pool so that
+ * any read operation messes with that pool and not
+ * the red one. */
+ apr_bucket_file *f = (apr_bucket_file *)bred->data;
+ apr_file_t *fd = f->fd;
+ int setaside = (f->readpool != bb->p);
+
+ if (setaside) {
+ status = apr_file_setaside(&fd, fd, bb->p);
+ if (status != APR_SUCCESS) {
+ goto leave;
+ }
+ ++beam->files_beamed;
+ }
+ ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length,
+ bb->p);
+#if APR_HAS_MMAP
+ /* disable mmap handling as this leads to segfaults when
+ * the underlying file is changed while memory pointer has
+ * been handed out. See also PR 59348 */
+ apr_bucket_file_enable_mmap(ng, 0);
+#endif
+ remain -= bred->length;
+ ++transferred;
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->hold, bred);
+ ++transferred;
+ continue;
+ }
+ else {
+ /* create a "green" standin bucket. we took care about the
+ * underlying red bucket and its data when we placed it into
+ * the red brigade.
+ * the beam bucket will notify us on destruction that bred is
+ * no longer needed. */
+ bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc,
+ beam->buckets_sent++);
+ }
+
+ /* Place the red bucket into our hold, to be destroyed when no
+ * green bucket references it any more. */
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->hold, bred);
+ beam->received_bytes += bred->length;
+ if (bgreen) {
+ APR_BRIGADE_INSERT_TAIL(bb, bgreen);
+ remain -= bgreen->length;
+ ++transferred;
+ }
+ }
+
+ if (readbytes > 0 && remain < 0) {
+ /* too much, put some back */
+ remain = readbytes;
+ for (bgreen = APR_BRIGADE_FIRST(bb);
+ bgreen != APR_BRIGADE_SENTINEL(bb);
+ bgreen = APR_BUCKET_NEXT(bgreen)) {
+ remain -= bgreen->length;
+ if (remain < 0) {
+ apr_bucket_split(bgreen, bgreen->length+remain);
+ beam->green = apr_brigade_split_ex(bb,
+ APR_BUCKET_NEXT(bgreen),
+ beam->green);
+ break;
+ }
+ }
+ }
+
+ if (beam->closed
+ && (!beam->green || APR_BRIGADE_EMPTY(beam->green))
+ && H2_BLIST_EMPTY(&beam->red)) {
+ /* beam is closed and we have nothing more to receive */
+ if (!beam->close_sent) {
+ apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ beam->close_sent = 1;
+ ++transferred;
+ status = APR_SUCCESS;
+ }
+ }
+
+ if (transferred) {
+ status = APR_SUCCESS;
+ }
+ else if (beam->closed) {
+ status = APR_EOF;
+ }
+ else if (block == APR_BLOCK_READ && bl.mutex && beam->m_cond) {
+ status = wait_cond(beam, bl.mutex);
+ if (status != APR_SUCCESS) {
+ goto leave;
+ }
+ goto transfer;
+ }
+ else {
+ status = APR_EAGAIN;
+ }
+leave:
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->consumed_fn = cb;
+ beam->consumed_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_on_produced(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->produced_fn = cb;
+ beam->produced_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_on_file_beam(h2_bucket_beam *beam,
+ h2_beam_can_beam_callback *cb, void *ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->can_beam_fn = cb;
+ beam->can_beam_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ for (b = H2_BLIST_FIRST(&beam->red);
+ b != H2_BLIST_SENTINEL(&beam->red);
+ b = APR_BUCKET_NEXT(b)) {
+ /* should all have determinate length */
+ l += b->length;
+ }
+ leave_yellow(beam, &bl);
+ }
+ return l;
+}
+
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ for (b = H2_BLIST_FIRST(&beam->red);
+ b != H2_BLIST_SENTINEL(&beam->red);
+ b = APR_BUCKET_NEXT(b)) {
+ if (APR_BUCKET_IS_FILE(b)) {
+ /* do not count */
+ }
+ else {
+ /* should all have determinate length */
+ l += b->length;
+ }
+ }
+ leave_yellow(beam, &bl);
+ }
+ return l;
+}
+
+int h2_beam_empty(h2_bucket_beam *beam)
+{
+ int empty = 1;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ empty = (H2_BLIST_EMPTY(&beam->red)
+ && (!beam->green || APR_BRIGADE_EMPTY(beam->green)));
+ leave_yellow(beam, &bl);
+ }
+ return empty;
+}
+
+int h2_beam_closed(h2_bucket_beam *beam)
+{
+ return beam->closed;
+}
+
+int h2_beam_was_received(h2_bucket_beam *beam)
+{
+ int happend = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ happend = (beam->received_bytes > 0);
+ leave_yellow(beam, &bl);
+ }
+ return happend;
+}
+
+apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam)
+{
+ apr_size_t n = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ n = beam->files_beamed;
+ leave_yellow(beam, &bl);
+ }
+ return n;
+}
+
diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h
new file mode 100644
index 00000000..5c5d65de
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.h
@@ -0,0 +1,363 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_bucket_beam_h
+#define h2_bucket_beam_h
+
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+
+/*******************************************************************************
+ * apr_bucket list without bells and whistles
+ ******************************************************************************/
+
+/**
+ * h2_blist can hold a list of buckets just like apr_bucket_brigade, but
+ * does not to any allocations or related features.
+ */
+typedef struct {
+ APR_RING_HEAD(h2_bucket_list, apr_bucket) list;
+} h2_blist;
+
+#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link);
+#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link)
+#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link)
+#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_BLIST_INSERT_HEAD(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_INSERT_TAIL(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+
+/**
+ * Print the buckets in the list into the buffer (type and lengths).
+ * @param buffer the buffer to print into
+ * @param bmax max number of characters to place in buffer, incl. trailing 0
+ * @param tag tag string for this bucket list
+ * @param sep separator to use
+ * @param bl the bucket list to print
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ h2_blist *bl);
+
+/*******************************************************************************
+ * h2_bucket_beam
+ ******************************************************************************/
+
+/**
+ * A h2_bucket_beam solves the task of transferring buckets, esp. their data,
+ * across threads with zero buffer copies.
+ *
+ * When a thread, let's call it the red thread, wants to send buckets to
+ * another, the green thread, it creates a h2_bucket_beam and adds buckets
+ * via the h2_beam_send(). It gives the beam to the green thread which then
+ * can receive buckets into its own brigade via h2_beam_receive().
+ *
+ * Sending and receiving can happen concurrently, if a thread mutex is set
+ * for the beam, see h2_beam_mutex_set.
+ *
+ * The beam can limit the amount of data it accepts via the buffer_size. This
+ * can also be adjusted during its lifetime. When the beam not only gets a
+ * mutex but als a condition variable (in h2_beam_mutex_set()), sends and
+ * receives can be done blocking. A timeout can be set for such blocks.
+ *
+ * Care needs to be taken when terminating the beam. The beam registers at
+ * the pool it was created with and will cleanup after itself. However, if
+ * received buckets do still exist, already freed memory might be accessed.
+ * The beam does a AP_DEBUG_ASSERT on this condition.
+ *
+ * The proper way of shutting down a beam is to first make sure there are no
+ * more green buckets out there, then cleanup the beam to purge eventually
+ * still existing red buckets and then, possibly, terminate the beam itself
+ * (or the pool it was created with).
+ *
+ * The following restrictions apply to bucket transport:
+ * - only EOS and FLUSH meta buckets are copied through. All other meta buckets
+ * are kept in the beams hold.
+ * - all kind of data buckets are transported through:
+ * - transient buckets are converted to heap ones on send
+ * - heap and pool buckets require no extra handling
+ * - buckets with indeterminate length are read on send
+ * - file buckets will transfer the file itself into a new bucket, if allowed
+ * - all other buckets are read on send to make sure data is present
+ *
+ * This assures that when the red thread sends its red buckets, the data
+ * is made accessible while still on the red side. The red bucket then enters
+ * the beams hold storage.
+ * When the green thread calls receive, red buckets in the hold are wrapped
+ * into special beam buckets. Beam buckets on read present the data directly
+ * from the internal red one, but otherwise live on the green side. When a
+ * beam bucket gets destroyed, it notifies its beam that the corresponding
+ * red bucket from the hold may be destroyed.
+ * Since the destruction of green buckets happens in the green thread, any
+ * corresponding red bucket can not immediately be destroyed, as that would
+ * result in race conditions.
+ * Instead, the beam transfers such red buckets from the hold to the purge
+ * storage. Next time there is a call from the red side, the buckets in
+ * purge will be deleted.
+ *
+ * There are callbacks that can be registered with a beam:
+ * - a "consumed" callback that gets called on the red side with the
+ * amount of data that has been received by the green side. The amount
+ * is a delta from the last callback invocation. The red side can trigger
+ * these callbacks by calling h2_beam_send() with a NULL brigade.
+ * - a "can_beam_file" callback that can prohibit the transfer of file handles
+ * through the beam. This will cause file buckets to be read on send and
+ * its data buffer will then be transports just like a heap bucket would.
+ * When no callback is registered, no restrictions apply and all files are
+ * passed through.
+ * File handles transferred to the green side will stay there until the
+ * receiving brigade's pool is destroyed/cleared. If the pool lives very
+ * long or if many different files are beamed, the process might run out
+ * of available file handles.
+ *
+ * The name "beam" of course is inspired by good old transporter
+ * technology where humans are kept inside the transporter's memory
+ * buffers until the transmission is complete. Star gates use a similar trick.
+ */
+
+typedef void h2_beam_mutex_leave(void *ctx, struct apr_thread_mutex_t *lock);
+
+typedef struct {
+ apr_thread_mutex_t *mutex;
+ h2_beam_mutex_leave *leave;
+ void *leave_ctx;
+} h2_beam_lock;
+
+typedef struct h2_bucket_beam h2_bucket_beam;
+
+typedef apr_status_t h2_beam_mutex_enter(void *ctx, h2_beam_lock *pbl);
+
+typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam,
+ apr_off_t bytes);
+
+typedef struct h2_beam_proxy h2_beam_proxy;
+typedef struct {
+ APR_RING_HEAD(h2_beam_proxy_list, h2_beam_proxy) list;
+} h2_bproxy_list;
+
+typedef int h2_beam_can_beam_callback(void *ctx, h2_bucket_beam *beam,
+ apr_file_t *file);
+
+struct h2_bucket_beam {
+ int id;
+ const char *tag;
+ h2_blist red;
+ h2_blist hold;
+ h2_blist purge;
+ apr_bucket_brigade *green;
+ h2_bproxy_list proxies;
+ apr_pool_t *red_pool;
+
+ apr_size_t max_buf_size;
+ apr_interval_time_t timeout;
+
+ apr_off_t sent_bytes; /* amount of bytes send */
+ apr_off_t received_bytes; /* amount of bytes received */
+
+ apr_size_t buckets_sent; /* # of beam buckets sent */
+ apr_size_t files_beamed; /* how many file handles have been set aside */
+ apr_file_t *last_beamed; /* last file beamed */
+
+ unsigned int aborted : 1;
+ unsigned int closed : 1;
+ unsigned int close_sent : 1;
+
+ void *m_ctx;
+ h2_beam_mutex_enter *m_enter;
+ struct apr_thread_cond_t *m_cond;
+
+ apr_off_t reported_consumed_bytes; /* amount of bytes reported as consumed */
+ h2_beam_io_callback *consumed_fn;
+ void *consumed_ctx;
+ apr_off_t reported_produced_bytes; /* amount of bytes reported as produced */
+ h2_beam_io_callback *produced_fn;
+ void *produced_ctx;
+ h2_beam_can_beam_callback *can_beam_fn;
+ void *can_beam_ctx;
+};
+
+/**
+ * Creates a new bucket beam for transfer of buckets across threads.
+ *
+ * The pool the beam is created with will be protected by the given
+ * mutex and will be used in multiple threads. It needs a pool allocator
+ * that is only used inside that same mutex.
+ *
+ * @param pbeam will hold the created beam on return
+ * @param red_pool pool usable on red side, beam lifeline
+ * @param buffer_size maximum memory footprint of buckets buffered in beam, or
+ * 0 for no limitation
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam,
+ apr_pool_t *red_pool,
+ int id, const char *tag,
+ apr_size_t buffer_size);
+
+/**
+ * Destroys the beam immediately without cleanup.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam);
+
+/**
+ * Send buckets from the given brigade through the beam. Will hold buckets
+ * internally as long as they have not been processed by the receiving side.
+ * All accepted buckets are removed from the given brigade. Will return with
+ * APR_EAGAIN on non-blocking sends when not all buckets could be accepted.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_send(h2_bucket_beam *beam,
+ apr_bucket_brigade *red_buckets,
+ apr_read_type_e block);
+
+/**
+ * Receive buckets from the beam into the given brigade. Will return APR_EOF
+ * when reading past an EOS bucket. Reads can be blocking until data is
+ * available or the beam has been closed. Non-blocking calls return APR_EAGAIN
+ * if no data is available.
+ *
+ * Call from the green side only.
+ */
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ apr_bucket_brigade *green_buckets,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+/**
+ * Determine if beam is closed. May still contain buffered data.
+ *
+ * Call from red or green side.
+ */
+int h2_beam_closed(h2_bucket_beam *beam);
+
+/**
+ * Determine if beam is empty.
+ *
+ * Call from red or green side.
+ */
+int h2_beam_empty(h2_bucket_beam *beam);
+
+/**
+ * Abort the beam. Will cleanup any buffered buckets and answer all send
+ * and receives with APR_ECONNABORTED.
+ *
+ * Call from the red side only.
+ */
+void h2_beam_abort(h2_bucket_beam *beam);
+
+/**
+ * Close the beam. Sending an EOS bucket serves the same purpose.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_close(h2_bucket_beam *beam);
+
+/**
+ * Return APR_SUCCESS when all buckets in transit have been handled.
+ * When called with APR_BLOCK_READ and a mutex set, will wait until the green
+ * side has consumed all data. Otherwise APR_EAGAIN is returned.
+ * With clear_buffers set, any queued data is discarded.
+ * If a timeout is set on the beam, waiting might also time out and
+ * return APR_ETIMEUP.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block,
+ int clear_buffers);
+
+void h2_beam_mutex_set(h2_bucket_beam *beam,
+ h2_beam_mutex_enter m_enter,
+ struct apr_thread_cond_t *cond,
+ void *m_ctx);
+
+/**
+ * Set/get the timeout for blocking read/write operations. Only works
+ * if a mutex has been set for the beam.
+ */
+void h2_beam_timeout_set(h2_bucket_beam *beam,
+ apr_interval_time_t timeout);
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam);
+
+/**
+ * Set/get the maximum buffer size for beam data (memory footprint).
+ */
+void h2_beam_buffer_size_set(h2_bucket_beam *beam,
+ apr_size_t buffer_size);
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam);
+
+/**
+ * Register a callback to be invoked on the red side with the
+ * amount of bytes that have been consumed by the red side, since the
+ * last callback invocation or reset.
+ * @param beam the beam to set the callback on
+ * @param cb the callback or NULL
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the red side, callbacks invoked on red side.
+ */
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx);
+
+/**
+ * Register a callback to be invoked on the red side with the
+ * amount of bytes that have been consumed by the red side, since the
+ * last callback invocation or reset.
+ * @param beam the beam to set the callback on
+ * @param cb the callback or NULL
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the red side, callbacks invoked on red side.
+ */
+void h2_beam_on_produced(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx);
+
+void h2_beam_on_file_beam(h2_bucket_beam *beam,
+ h2_beam_can_beam_callback *cb, void *ctx);
+
+/**
+ * Get the amount of bytes currently buffered in the beam (unread).
+ */
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam);
+
+/**
+ * Get the memory used by the buffered buckets, approximately.
+ */
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam);
+
+/**
+ * Return != 0 iff (some) data from the beam has been received.
+ */
+int h2_beam_was_received(h2_bucket_beam *beam);
+
+apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam);
+
+#endif /* h2_bucket_beam_h */
diff --git a/modules/http2/h2_bucket_eoc.c b/modules/http2/h2_bucket_eoc.c
index 3ddb54d6..33144ef5 100644
--- a/modules/http2/h2_bucket_eoc.c
+++ b/modules/http2/h2_bucket_eoc.c
@@ -23,6 +23,7 @@
#include <http_log.h>
#include "h2_private.h"
+#include "h2.h"
#include "h2_mplx.h"
#include "h2_session.h"
#include "h2_bucket_eoc.h"
diff --git a/modules/http2/h2_bucket_eos.c b/modules/http2/h2_bucket_eos.c
index 98a0b365..28c34fdc 100644
--- a/modules/http2/h2_bucket_eos.c
+++ b/modules/http2/h2_bucket_eos.c
@@ -23,6 +23,7 @@
#include <http_log.h>
#include "h2_private.h"
+#include "h2.h"
#include "h2_mplx.h"
#include "h2_stream.h"
#include "h2_bucket_eos.h"
@@ -37,10 +38,8 @@ static apr_status_t bucket_cleanup(void *data)
h2_stream **pstream = data;
if (*pstream) {
- /*
- * If bucket_destroy is called after us, this prevents
- * bucket_destroy from trying to destroy the pool again.
- */
+ /* If bucket_destroy is called after us, this prevents
+ * bucket_destroy from trying to destroy the stream again. */
*pstream = NULL;
}
return APR_SUCCESS;
@@ -91,10 +90,13 @@ static void bucket_destroy(void *data)
if (apr_bucket_shared_destroy(h)) {
h2_stream *stream = h->stream;
- if (stream) {
- h2_stream_cleanup(stream);
+ if (stream && stream->pool) {
+ apr_pool_cleanup_kill(stream->pool, &h->stream, bucket_cleanup);
}
apr_bucket_free(h);
+ if (stream) {
+ h2_stream_eos_destroy(stream);
+ }
}
}
diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
index 3b28c1f9..4ddf1b70 100644
--- a/modules/http2/h2_conn.c
+++ b/modules/http2/h2_conn.c
@@ -26,6 +26,7 @@
#include <http_request.h>
#include "h2_private.h"
+#include "h2.h"
#include "h2_config.h"
#include "h2_ctx.h"
#include "h2_filter.h"
diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c
index 59561ecd..df4aec14 100644
--- a/modules/http2/h2_conn_io.c
+++ b/modules/http2/h2_conn_io.c
@@ -45,7 +45,6 @@
* which seems to create less TCP packets overall
*/
#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100)
-#define WRITE_BUFFER_SIZE (5*WRITE_SIZE_MAX)
static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
@@ -127,22 +126,13 @@ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
}
apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
- const h2_config *cfg,
- apr_pool_t *pool)
+ const h2_config *cfg)
{
io->c = c;
- io->output = apr_brigade_create(pool, c->bucket_alloc);
- io->buflen = 0;
+ io->output = apr_brigade_create(c->pool, c->bucket_alloc);
io->is_tls = h2_h2_is_tls(c);
io->buffer_output = io->is_tls;
-
- if (io->buffer_output) {
- io->bufsize = WRITE_BUFFER_SIZE;
- io->buffer = apr_pcalloc(pool, io->bufsize);
- }
- else {
- io->bufsize = 0;
- }
+ io->pass_threshold = h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM) / 2;
if (io->is_tls) {
/* This is what we start with,
@@ -151,12 +141,13 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE);
io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS)
* APR_USEC_PER_SEC);
- io->write_size = WRITE_SIZE_INITIAL;
+ io->write_size = (io->cooldown_usecs > 0?
+ WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
}
else {
io->warmup_size = 0;
io->cooldown_usecs = 0;
- io->write_size = io->bufsize;
+ io->write_size = 0;
}
if (APLOGctrace1(c)) {
@@ -170,54 +161,94 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
return APR_SUCCESS;
}
-int h2_conn_io_is_buffered(h2_conn_io *io)
+#define LOG_SCRATCH 0
+
+static void append_scratch(h2_conn_io *io)
{
- return io->bufsize > 0;
+ if (io->scratch && io->slen > 0) {
+ apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen,
+ apr_bucket_free,
+ io->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03386)
+ "h2_conn_io(%ld): append_scratch(%ld)",
+ io->c->id, (long)io->slen);
+#endif
+ io->scratch = NULL;
+ io->slen = io->ssize = 0;
+ }
}
-typedef struct {
- conn_rec *c;
- h2_conn_io *io;
-} pass_out_ctx;
-
-static apr_status_t pass_out(apr_bucket_brigade *bb, void *ctx)
+static apr_size_t assure_scratch_space(h2_conn_io *io) {
+ apr_size_t remain = io->ssize - io->slen;
+ if (io->scratch && remain == 0) {
+ append_scratch(io);
+ }
+ if (!io->scratch) {
+ /* we control the size and it is larger than what buckets usually
+ * allocate. */
+ io->scratch = apr_bucket_alloc(io->write_size, io->c->bucket_alloc);
+ io->ssize = io->write_size;
+ io->slen = 0;
+ remain = io->ssize;
+ }
+ return remain;
+}
+
+static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b)
{
- pass_out_ctx *pctx = ctx;
- conn_rec *c = pctx->c;
apr_status_t status;
- apr_off_t bblen;
+ const char *data;
+ apr_size_t len;
- if (APR_BRIGADE_EMPTY(bb)) {
+ if (!b->length) {
return APR_SUCCESS;
}
- ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_WRITE, c);
- apr_brigade_length(bb, 0, &bblen);
- h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", bb);
- status = ap_pass_brigade(c->output_filters, bb);
- if (status == APR_SUCCESS && pctx->io) {
- pctx->io->bytes_written += (apr_size_t)bblen;
- pctx->io->last_write = apr_time_now();
+ AP_DEBUG_ASSERT(b->length <= (io->ssize - io->slen));
+ if (APR_BUCKET_IS_FILE(b)) {
+ apr_bucket_file *f = (apr_bucket_file *)b->data;
+ apr_file_t *fd = f->fd;
+ apr_off_t offset = b->start;
+ apr_size_t len = b->length;
+
+ /* file buckets will either mmap (which we do not want) or
+ * read 8000 byte chunks and split themself. However, we do
+ * know *exactly* how many bytes we need where.
+ */
+ status = apr_file_seek(fd, APR_SET, &offset);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ status = apr_file_read(fd, io->scratch + io->slen, &len);
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->c, APLOGNO(03387)
+ "h2_conn_io(%ld): FILE_to_scratch(%ld)",
+ io->c->id, (long)len);
+#endif
+ if (status != APR_SUCCESS && status != APR_EOF) {
+ return status;
+ }
+ io->slen += len;
}
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
- "h2_conn_io(%ld): pass_out brigade %ld bytes",
- c->id, (long)bblen);
+ else {
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03388)
+ "h2_conn_io(%ld): read_to_scratch(%ld)",
+ io->c->id, (long)b->length);
+#endif
+ memcpy(io->scratch+io->slen, data, len);
+ io->slen += len;
+ }
}
- apr_brigade_cleanup(bb);
return status;
}
-/* Bring the current buffer content into the output brigade, appropriately
- * chunked.
- */
-static apr_status_t bucketeer_buffer(h2_conn_io *io)
+static void check_write_size(h2_conn_io *io)
{
- const char *data = io->buffer;
- apr_size_t remaining = io->buflen;
- apr_bucket *b;
- int bcount, i;
-
if (io->write_size > WRITE_SIZE_INITIAL
&& (io->cooldown_usecs > 0)
&& (apr_time_now() - io->last_write) >= io->cooldown_usecs) {
@@ -236,130 +267,156 @@ static apr_status_t bucketeer_buffer(h2_conn_io *io)
"h2_conn_io(%ld): threshold reached, write size now %ld",
(long)io->c->id, (long)io->write_size);
}
-
- bcount = (int)(remaining / io->write_size);
- for (i = 0; i < bcount; ++i) {
- b = apr_bucket_transient_create(data, io->write_size,
- io->output->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(io->output, b);
- data += io->write_size;
- remaining -= io->write_size;
- }
-
- if (remaining > 0) {
- b = apr_bucket_transient_create(data, remaining,
- io->output->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(io->output, b);
- }
- return APR_SUCCESS;
-}
-
-apr_status_t h2_conn_io_writeb(h2_conn_io *io, apr_bucket *b)
-{
- APR_BRIGADE_INSERT_TAIL(io->output, b);
- return APR_SUCCESS;
}
-static apr_status_t h2_conn_io_flush_int(h2_conn_io *io, int flush, int eoc)
+static apr_status_t pass_output(h2_conn_io *io, int flush, int eoc)
{
- pass_out_ctx ctx;
+ conn_rec *c = io->c;
apr_bucket *b;
+ apr_off_t bblen;
+ apr_status_t status;
- if (io->buflen == 0 && APR_BRIGADE_EMPTY(io->output)) {
- return APR_SUCCESS;
- }
-
- if (io->buflen > 0) {
- /* something in the buffer, put it in the output brigade */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
- "h2_conn_io: flush, flushing %ld bytes",
- (long)io->buflen);
- bucketeer_buffer(io);
- }
-
+ append_scratch(io);
if (flush) {
- b = apr_bucket_flush_create(io->c->bucket_alloc);
+ b = apr_bucket_flush_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(io->output, b);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, "h2_conn_io: flush");
- io->buflen = 0;
- ctx.c = io->c;
- ctx.io = eoc? NULL : io;
+ if (APR_BRIGADE_EMPTY(io->output)) {
+ return APR_SUCCESS;
+ }
- return pass_out(io->output, &ctx);
- /* no more access after this, as we might have flushed an EOC bucket
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, "h2_conn_io: pass_output");
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL);
+ apr_brigade_length(io->output, 0, &bblen);
+
+ h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", io->output);
+ status = ap_pass_brigade(c->output_filters, io->output);
+
+ /* careful with access after this, as we might have flushed an EOC bucket
* that de-allocated us all. */
+ if (!eoc) {
+ apr_brigade_cleanup(io->output);
+ if (status == APR_SUCCESS) {
+ io->bytes_written += (apr_size_t)bblen;
+ io->last_write = apr_time_now();
+ }
+ }
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
+ "h2_conn_io(%ld): pass_out brigade %ld bytes",
+ c->id, (long)bblen);
+ }
+ return status;
}
apr_status_t h2_conn_io_flush(h2_conn_io *io)
{
- return h2_conn_io_flush_int(io, 1, 0);
-}
-
-apr_status_t h2_conn_io_consider_pass(h2_conn_io *io)
-{
- apr_off_t len = 0;
-
- if (!APR_BRIGADE_EMPTY(io->output)) {
- len = h2_brigade_mem_size(io->output);
- }
- len += io->buflen;
- if (len >= WRITE_BUFFER_SIZE) {
- return h2_conn_io_flush_int(io, 1, 0);
- }
- return APR_SUCCESS;
+ return pass_output(io, 1, 0);
}
apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, h2_session *session)
{
apr_bucket *b = h2_bucket_eoc_create(io->c->bucket_alloc, session);
APR_BRIGADE_INSERT_TAIL(io->output, b);
- return h2_conn_io_flush_int(io, 0, 1);
+ return pass_output(io, 1, 1);
}
-apr_status_t h2_conn_io_write(h2_conn_io *io,
- const char *buf, size_t length)
+apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
{
apr_status_t status = APR_SUCCESS;
- pass_out_ctx ctx;
+ apr_size_t remain;
- ctx.c = io->c;
- ctx.io = io;
- if (io->bufsize > 0) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
- "h2_conn_io: buffering %ld bytes", (long)length);
-
- if (!APR_BRIGADE_EMPTY(io->output)) {
- status = h2_conn_io_flush_int(io, 0, 0);
- }
-
- while (length > 0 && (status == APR_SUCCESS)) {
- apr_size_t avail = io->bufsize - io->buflen;
- if (avail <= 0) {
- status = h2_conn_io_flush_int(io, 0, 0);
- }
- else if (length > avail) {
- memcpy(io->buffer + io->buflen, buf, avail);
- io->buflen += avail;
- length -= avail;
- buf += avail;
+ if (io->buffer_output) {
+ while (length > 0) {
+ remain = assure_scratch_space(io);
+ if (remain >= length) {
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03389)
+ "h2_conn_io(%ld): write_to_scratch(%ld)",
+ io->c->id, (long)length);
+#endif
+ memcpy(io->scratch + io->slen, data, length);
+ io->slen += length;
+ length = 0;
}
else {
- memcpy(io->buffer + io->buflen, buf, length);
- io->buflen += length;
- length = 0;
- break;
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03390)
+ "h2_conn_io(%ld): write_to_scratch(%ld)",
+ io->c->id, (long)remain);
+#endif
+ memcpy(io->scratch + io->slen, data, remain);
+ io->slen += remain;
+ data += remain;
+ length -= remain;
}
}
-
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, status, io->c,
- "h2_conn_io: writing %ld bytes to brigade", (long)length);
- status = apr_brigade_write(io->output, pass_out, &ctx, buf, length);
+ status = apr_brigade_write(io->output, NULL, NULL, data, length);
}
+ return status;
+}
+
+apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+ check_write_size(io);
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* need to finish any open scratch bucket, as meta data
+ * needs to be forward "in order". */
+ append_scratch(io);
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ else if (io->buffer_output) {
+ apr_size_t remain = assure_scratch_space(io);
+ if (b->length > remain) {
+ apr_bucket_split(b, remain);
+ if (io->slen == 0) {
+ /* complete write_size bucket, append unchanged */
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03391)
+ "h2_conn_io(%ld): pass bucket(%ld)",
+ io->c->id, (long)b->length);
+#endif
+ continue;
+ }
+ }
+ else {
+ /* bucket fits in remain, copy to scratch */
+ status = read_to_scratch(io, b);
+ apr_bucket_delete(b);
+ continue;
+ }
+ }
+ else {
+ /* no buffering, forward buckets setaside on flush */
+ if (APR_BUCKET_IS_TRANSIENT(b)) {
+ apr_bucket_setaside(b, io->c->pool);
+ }
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ if (!APR_BRIGADE_EMPTY(io->output)) {
+ apr_off_t len = h2_brigade_mem_size(io->output);
+ if (len >= io->pass_threshold) {
+ return pass_output(io, 0, 0);
+ }
+ }
+ }
return status;
}
diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_conn_io.h
index b8be671d..4ccf0070 100644
--- a/modules/http2/h2_conn_io.h
+++ b/modules/http2/h2_conn_io.h
@@ -39,16 +39,15 @@ typedef struct {
apr_int64_t bytes_written;
int buffer_output;
- char *buffer;
- apr_size_t buflen;
- apr_size_t bufsize;
+ apr_size_t pass_threshold;
+
+ char *scratch;
+ apr_size_t ssize;
+ apr_size_t slen;
} h2_conn_io;
apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
- const struct h2_config *cfg,
- apr_pool_t *pool);
-
-int h2_conn_io_is_buffered(h2_conn_io *io);
+ const struct h2_config *cfg);
/**
* Append data to the buffered output.
@@ -59,12 +58,7 @@ apr_status_t h2_conn_io_write(h2_conn_io *io,
const char *buf,
size_t length);
-/**
- * Append a bucket to the buffered output.
- * @param io the connection io
- * @param b the bucket to append
- */
-apr_status_t h2_conn_io_writeb(h2_conn_io *io, apr_bucket *b);
+apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb);
/**
* Append an End-Of-Connection bucket to the output that, once destroyed,
@@ -79,11 +73,4 @@ apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, struct h2_session *session);
*/
apr_status_t h2_conn_io_flush(h2_conn_io *io);
-/**
- * Check the amount of buffered output and pass it on if enough has accumulated.
- * @param io the connection io
- * @param flush if a flush bucket should be appended to any output
- */
-apr_status_t h2_conn_io_consider_pass(h2_conn_io *io);
-
#endif /* defined(__mod_h2__h2_conn_io__) */
diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c
index 8b786b94..4b596a3d 100644
--- a/modules/http2/h2_ctx.c
+++ b/modules/http2/h2_ctx.c
@@ -23,7 +23,6 @@
#include "h2_session.h"
#include "h2_task.h"
#include "h2_ctx.h"
-#include "h2_private.h"
static h2_ctx *h2_ctx_create(const conn_rec *c)
{
diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c
index 8bf7fbcb..33189de0 100644
--- a/modules/http2/h2_filter.c
+++ b/modules/http2/h2_filter.c
@@ -22,6 +22,7 @@
#include <scoreboard.h>
#include "h2_private.h"
+#include "h2.h"
#include "h2_conn_io.h"
#include "h2_ctx.h"
#include "h2_mplx.h"
@@ -208,18 +209,27 @@ static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...)
return rv;
}
-static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb)
+static apr_status_t h2_status_stream_filter(h2_stream *stream)
{
- h2_stream *stream = sos->stream;
h2_session *session = stream->session;
h2_mplx *mplx = session->mplx;
+ conn_rec *c = session->c;
h2_push_diary *diary;
+ apr_bucket_brigade *bb;
apr_status_t status;
- if (!bb) {
- bb = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+ if (!stream->response) {
+ return APR_EINVAL;
}
+ if (!stream->buffer) {
+ stream->buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
+ }
+ bb = stream->buffer;
+
+ apr_table_unset(stream->response->headers, "Content-Length");
+ stream->response->content_length = -1;
+
bbout(bb, "{\n");
bbout(bb, " \"HTTP2\": \"on\",\n");
bbout(bb, " \"H2PUSH\": \"%s\",\n", h2_session_push_enabled(session)? "on" : "off");
@@ -266,57 +276,15 @@ static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb)
bbout(bb, " \"bytes_sent\": %"APR_UINT64_T_FMT"\n", session->io.bytes_written);
bbout(bb, "}\n");
- return sos->prev->buffer(sos->prev, bb);
-}
-
-static apr_status_t h2_sos_h2_status_read_to(h2_sos *sos, apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos)
-{
- return sos->prev->read_to(sos->prev, bb, plen, peos);
-}
-
-static apr_status_t h2_sos_h2_status_prepare(h2_sos *sos, apr_off_t *plen, int *peos)
-{
- return sos->prev->prepare(sos->prev, plen, peos);
-}
-
-static apr_status_t h2_sos_h2_status_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx,
- apr_off_t *plen, int *peos)
-{
- return sos->prev->readx(sos->prev, cb, ctx, plen, peos);
-}
-
-static apr_table_t *h2_sos_h2_status_get_trailers(h2_sos *sos)
-{
- return sos->prev->get_trailers(sos->prev);
-}
-
-static h2_sos *h2_sos_h2_status_create(h2_sos *prev)
-{
- h2_sos *sos;
- h2_response *response = prev->response;
-
- apr_table_unset(response->headers, "Content-Length");
- response->content_length = -1;
-
- sos = apr_pcalloc(prev->stream->pool, sizeof(*sos));
- sos->prev = prev;
- sos->response = response;
- sos->stream = prev->stream;
- sos->buffer = h2_sos_h2_status_buffer;
- sos->prepare = h2_sos_h2_status_prepare;
- sos->readx = h2_sos_h2_status_readx;
- sos->read_to = h2_sos_h2_status_read_to;
- sos->get_trailers = h2_sos_h2_status_get_trailers;
-
- return sos;
+ return APR_SUCCESS;
}
-h2_sos *h2_filter_sos_create(const char *name, struct h2_sos *prev)
+apr_status_t h2_stream_filter(h2_stream *stream)
{
- if (!strcmp(H2_SOS_H2_STATUS, name)) {
- return h2_sos_h2_status_create(prev);
+ const char *fname = stream->response? stream->response->sos_filter : NULL;
+ if (fname && !strcmp(H2_SOS_H2_STATUS, fname)) {
+ return h2_status_stream_filter(stream);
}
- return prev;
+ return APR_SUCCESS;
}
diff --git a/modules/http2/h2_filter.h b/modules/http2/h2_filter.h
index 2f281f8b..5ba7d158 100644
--- a/modules/http2/h2_filter.h
+++ b/modules/http2/h2_filter.h
@@ -43,35 +43,9 @@ apr_status_t h2_filter_core_input(ap_filter_t* filter,
apr_read_type_e block,
apr_off_t readbytes);
-typedef struct h2_sos h2_sos;
-typedef apr_status_t h2_sos_data_cb(void *ctx, const char *data, apr_off_t len);
-
-typedef apr_status_t h2_sos_buffer(h2_sos *sos, apr_bucket_brigade *bb);
-typedef apr_status_t h2_sos_prepare(h2_sos *sos, apr_off_t *plen, int *peos);
-typedef apr_status_t h2_sos_readx(h2_sos *sos, h2_sos_data_cb *cb,
- void *ctx, apr_off_t *plen, int *peos);
-typedef apr_status_t h2_sos_read_to(h2_sos *sos, apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos);
-typedef apr_table_t *h2_sos_get_trailers(h2_sos *sos);
-
-
#define H2_RESP_SOS_NOTE "h2-sos-filter"
-struct h2_sos {
- struct h2_stream *stream;
- h2_sos *prev;
- struct h2_response *response;
- void *ctx;
- h2_sos_buffer *buffer;
- h2_sos_prepare *prepare;
- h2_sos_readx *readx;
- h2_sos_read_to *read_to;
- h2_sos_get_trailers *get_trailers;
-};
-
-h2_sos *h2_filter_sos_create(const char *name, struct h2_sos *prev);
-
+apr_status_t h2_stream_filter(struct h2_stream *stream);
int h2_filter_h2_status_handler(request_rec *r);
-
#endif /* __mod_h2__h2_filter__ */
diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c
index 8e1f163a..0f893ec1 100644
--- a/modules/http2/h2_from_h1.c
+++ b/modules/http2/h2_from_h1.c
@@ -31,7 +31,6 @@
#include "h2_response.h"
#include "h2_from_h1.h"
#include "h2_task.h"
-#include "h2_task_output.h"
#include "h2_util.h"
@@ -292,7 +291,8 @@ static void fix_vary(request_rec *r)
}
}
-static void set_basic_http_header(request_rec *r, apr_table_t *headers)
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool)
{
char *date = NULL;
const char *proxy_date = NULL;
@@ -303,7 +303,7 @@ static void set_basic_http_header(request_rec *r, apr_table_t *headers)
* keep the set-by-proxy server and date headers, otherwise
* generate a new server header / date header
*/
- if (r->proxyreq != PROXYREQ_NONE) {
+ if (r && r->proxyreq != PROXYREQ_NONE) {
proxy_date = apr_table_get(r->headers_out, "Date");
if (!proxy_date) {
/*
@@ -311,25 +311,29 @@ static void set_basic_http_header(request_rec *r, apr_table_t *headers)
* our own Date header and pass it over to proxy_date later to
* avoid a compiler warning.
*/
- date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, r->request_time);
}
server = apr_table_get(r->headers_out, "Server");
}
else {
- date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r? r->request_time : apr_time_now());
}
apr_table_setn(headers, "Date", proxy_date ? proxy_date : date );
- apr_table_unset(r->headers_out, "Date");
+ if (r) {
+ apr_table_unset(r->headers_out, "Date");
+ }
if (!server && *us) {
server = us;
}
if (server) {
apr_table_setn(headers, "Server", server);
- apr_table_unset(r->headers_out, "Server");
+ if (r) {
+ apr_table_unset(r->headers_out, "Server");
+ }
}
}
@@ -446,7 +450,7 @@ static h2_response *create_response(h2_from_h1 *from_h1, request_rec *r)
headers = apr_table_make(r->pool, 10);
- set_basic_http_header(r, headers);
+ h2_from_h1_set_basic_http_header(headers, r, r->pool);
if (r->status == HTTP_NOT_MODIFIED) {
apr_table_do((int (*)(void *, const char *, const char *)) copy_header,
(void *) headers, r->headers_out,
@@ -473,7 +477,7 @@ static h2_response *create_response(h2_from_h1 *from_h1, request_rec *r)
apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
h2_task *task = f->ctx;
- h2_from_h1 *from_h1 = task->output? task->output->from_h1 : NULL;
+ h2_from_h1 *from_h1 = task->output.from_h1;
request_rec *r = f->r;
apr_bucket *b;
ap_bucket_error *eb = NULL;
@@ -483,7 +487,7 @@ apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
"h2_from_h1(%d): output_filter called", from_h1->stream_id);
- if (r->header_only && task->output && from_h1->response) {
+ if (r->header_only && from_h1->response) {
/* throw away any data after we have compiled the response */
apr_brigade_cleanup(bb);
return OK;
@@ -552,7 +556,7 @@ apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
h2_task *task = f->ctx;
- h2_from_h1 *from_h1 = task->output? task->output->from_h1 : NULL;
+ h2_from_h1 *from_h1 = task->output.from_h1;
request_rec *r = f->r;
apr_bucket *b;
diff --git a/modules/http2/h2_from_h1.h b/modules/http2/h2_from_h1.h
index af5dea24..71cc35fa 100644
--- a/modules/http2/h2_from_h1.h
+++ b/modules/http2/h2_from_h1.h
@@ -69,4 +69,7 @@ apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb);
apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb);
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool);
+
#endif /* defined(__mod_h2__h2_from_h1__) */
diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c
index 05fb6ef6..825cd77e 100644
--- a/modules/http2/h2_h2.c
+++ b/modules/http2/h2_h2.c
@@ -86,7 +86,7 @@ const char *h2_h2_err_description(unsigned int h2_error)
if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) {
return h2_err_descr[h2_error];
}
- return "unknown http/2 errotr code";
+ return "unknown http/2 error code";
}
/*******************************************************************************
@@ -485,9 +485,9 @@ int h2_is_acceptable_connection(conn_rec *c, int require_all)
if (strncmp("TLS", val, 3)
|| !strcmp("TLSv1", val)
|| !strcmp("TLSv1.1", val)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050)
- "h2_h2(%ld): tls protocol not suitable: %s",
- (long)c->id, val);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050)
+ "h2_h2(%ld): tls protocol not suitable: %s",
+ (long)c->id, val);
return 0;
}
}
@@ -680,7 +680,6 @@ static int h2_h2_post_read_req(request_rec *r)
struct h2_task *task = h2_ctx_get_task(ctx);
/* This hook will get called twice on internal redirects. Take care
* that we manipulate filters only once. */
- /* our slave connection? */
if (task && !task->filters_set) {
ap_filter_t *f;
diff --git a/modules/http2/h2_int_queue.c b/modules/http2/h2_int_queue.c
deleted file mode 100644
index 472ae340..00000000
--- a/modules/http2/h2_int_queue.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stddef.h>
-#include <apr_pools.h>
-
-#include "h2_int_queue.h"
-
-
-static void tq_grow(h2_int_queue *q, int nlen);
-static void tq_swap(h2_int_queue *q, int i, int j);
-static int tq_bubble_up(h2_int_queue *q, int i, int top,
- h2_iq_cmp *cmp, void *ctx);
-static int tq_bubble_down(h2_int_queue *q, int i, int bottom,
- h2_iq_cmp *cmp, void *ctx);
-
-h2_int_queue *h2_iq_create(apr_pool_t *pool, int capacity)
-{
- h2_int_queue *q = apr_pcalloc(pool, sizeof(h2_int_queue));
- if (q) {
- q->pool = pool;
- tq_grow(q, capacity);
- q->nelts = 0;
- }
- return q;
-}
-
-int h2_iq_empty(h2_int_queue *q)
-{
- return q->nelts == 0;
-}
-
-int h2_iq_size(h2_int_queue *q)
-{
- return q->nelts;
-}
-
-
-void h2_iq_add(h2_int_queue *q, int sid, h2_iq_cmp *cmp, void *ctx)
-{
- int i;
-
- if (q->nelts >= q->nalloc) {
- tq_grow(q, q->nalloc * 2);
- }
-
- i = (q->head + q->nelts) % q->nalloc;
- q->elts[i] = sid;
- ++q->nelts;
-
- if (cmp) {
- /* bubble it to the front of the queue */
- tq_bubble_up(q, i, q->head, cmp, ctx);
- }
-}
-
-int h2_iq_remove(h2_int_queue *q, int sid)
-{
- int i;
- for (i = 0; i < q->nelts; ++i) {
- if (sid == q->elts[(q->head + i) % q->nalloc]) {
- break;
- }
- }
-
- if (i < q->nelts) {
- ++i;
- for (; i < q->nelts; ++i) {
- q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
- }
- --q->nelts;
- return 1;
- }
- return 0;
-}
-
-void h2_iq_clear(h2_int_queue *q)
-{
- q->nelts = 0;
-}
-
-void h2_iq_sort(h2_int_queue *q, h2_iq_cmp *cmp, void *ctx)
-{
- /* Assume that changes in ordering are minimal. This needs,
- * best case, q->nelts - 1 comparisions to check that nothing
- * changed.
- */
- if (q->nelts > 0) {
- int i, ni, prev, last;
-
- /* Start at the end of the queue and create a tail of sorted
- * entries. Make that tail one element longer in each iteration.
- */
- last = i = (q->head + q->nelts - 1) % q->nalloc;
- while (i != q->head) {
- prev = (q->nalloc + i - 1) % q->nalloc;
-
- ni = tq_bubble_up(q, i, prev, cmp, ctx);
- if (ni == prev) {
- /* i bubbled one up, bubble the new i down, which
- * keeps all tasks below i sorted. */
- tq_bubble_down(q, i, last, cmp, ctx);
- }
- i = prev;
- };
- }
-}
-
-
-int h2_iq_shift(h2_int_queue *q)
-{
- int sid;
-
- if (q->nelts <= 0) {
- return 0;
- }
-
- sid = q->elts[q->head];
- q->head = (q->head + 1) % q->nalloc;
- q->nelts--;
-
- return sid;
-}
-
-static void tq_grow(h2_int_queue *q, int nlen)
-{
- if (nlen > q->nalloc) {
- int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
- if (q->nelts > 0) {
- int l = ((q->head + q->nelts) % q->nalloc) - q->head;
-
- memmove(nq, q->elts + q->head, sizeof(int) * l);
- if (l < q->nelts) {
- /* elts wrapped, append elts in [0, remain] to nq */
- int remain = q->nelts - l;
- memmove(nq + l, q->elts, sizeof(int) * remain);
- }
- }
- q->elts = nq;
- q->nalloc = nlen;
- q->head = 0;
- }
-}
-
-static void tq_swap(h2_int_queue *q, int i, int j)
-{
- int x = q->elts[i];
- q->elts[i] = q->elts[j];
- q->elts[j] = x;
-}
-
-static int tq_bubble_up(h2_int_queue *q, int i, int top,
- h2_iq_cmp *cmp, void *ctx)
-{
- int prev;
- while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
- && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
- tq_swap(q, prev, i);
- i = prev;
- }
- return i;
-}
-
-static int tq_bubble_down(h2_int_queue *q, int i, int bottom,
- h2_iq_cmp *cmp, void *ctx)
-{
- int next;
- while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
- && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
- tq_swap(q, next, i);
- i = next;
- }
- return i;
-}
diff --git a/modules/http2/h2_int_queue.h b/modules/http2/h2_int_queue.h
deleted file mode 100644
index 69f1e1c9..00000000
--- a/modules/http2/h2_int_queue.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_int_queue__
-#define __mod_h2__h2_int_queue__
-
-/**
- * h2_int_queue keeps a list of sorted h2_task* in ascending order.
- */
-typedef struct h2_int_queue h2_int_queue;
-
-struct h2_int_queue {
- int *elts;
- int head;
- int nelts;
- int nalloc;
- apr_pool_t *pool;
-};
-
-/**
- * Comparator for two task to determine their order.
- *
- * @param s1 stream id to compare
- * @param s2 stream id to compare
- * @param ctx provided user data
- * @return value is the same as for strcmp() and has the effect:
- * == 0: s1 and s2 are treated equal in ordering
- * < 0: s1 should be sorted before s2
- * > 0: s2 should be sorted before s1
- */
-typedef int h2_iq_cmp(int s1, int s2, void *ctx);
-
-
-/**
- * Allocate a new queue from the pool and initialize.
- * @param id the identifier of the queue
- * @param pool the memory pool
- */
-h2_int_queue *h2_iq_create(apr_pool_t *pool, int capacity);
-
-/**
- * Return != 0 iff there are no tasks in the queue.
- * @param q the queue to check
- */
-int h2_iq_empty(h2_int_queue *q);
-
-/**
- * Return the number of int in the queue.
- * @param q the queue to get size on
- */
-int h2_iq_size(h2_int_queue *q);
-
-/**
- * Add a stream idto the queue.
- *
- * @param q the queue to append the task to
- * @param sid the stream id to add
- * @param cmp the comparator for sorting
- * @param ctx user data for comparator
- */
-void h2_iq_add(h2_int_queue *q, int sid, h2_iq_cmp *cmp, void *ctx);
-
-/**
- * Remove the stream id from the queue. Return != 0 iff task
- * was found in queue.
- * @param q the task queue
- * @param sid the stream id to remove
- * @return != 0 iff task was found in queue
- */
-int h2_iq_remove(h2_int_queue *q, int sid);
-
-/**
- * Remove all entries in the queue.
- */
-void h2_iq_clear(h2_int_queue *q);
-
-/**
- * Sort the stream idqueue again. Call if the task ordering
- * has changed.
- *
- * @param q the queue to sort
- * @param cmp the comparator for sorting
- * @param ctx user data for the comparator
- */
-void h2_iq_sort(h2_int_queue *q, h2_iq_cmp *cmp, void *ctx);
-
-/**
- * Get the first stream id from the queue or NULL if the queue is empty.
- * The task will be removed.
- *
- * @param q the queue to get the first task from
- * @return the first stream id of the queue, 0 if empty
- */
-int h2_iq_shift(h2_int_queue *q);
-
-#endif /* defined(__mod_h2__h2_int_queue__) */
diff --git a/modules/http2/h2_io.c b/modules/http2/h2_io.c
deleted file mode 100644
index 5bbf09e9..00000000
--- a/modules/http2/h2_io.c
+++ /dev/null
@@ -1,453 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-
-#include <apr_pools.h>
-#include <apr_thread_mutex.h>
-#include <apr_thread_cond.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_log.h>
-#include <http_connection.h>
-#include <http_request.h>
-
-#include "h2_private.h"
-#include "h2_h2.h"
-#include "h2_io.h"
-#include "h2_mplx.h"
-#include "h2_response.h"
-#include "h2_request.h"
-#include "h2_task.h"
-#include "h2_util.h"
-
-h2_io *h2_io_create(int id, apr_pool_t *pool,
- apr_bucket_alloc_t *bucket_alloc,
- const h2_request *request)
-{
- h2_io *io = apr_pcalloc(pool, sizeof(*io));
- if (io) {
- io->id = id;
- io->pool = pool;
- io->bucket_alloc = bucket_alloc;
- io->request = h2_request_clone(pool, request);
- }
- return io;
-}
-
-static void check_bbin(h2_io *io)
-{
- if (!io->bbin) {
- io->bbin = apr_brigade_create(io->pool, io->bucket_alloc);
- }
-}
-
-static void check_bbout(h2_io *io)
-{
- if (!io->bbout) {
- io->bbout = apr_brigade_create(io->pool, io->bucket_alloc);
- }
-}
-
-static void check_bbtmp(h2_io *io)
-{
- if (!io->bbtmp) {
- io->bbtmp = apr_brigade_create(io->pool, io->bucket_alloc);
- }
-}
-
-static void append_eos(h2_io *io, apr_bucket_brigade *bb)
-{
- APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(io->bucket_alloc));
-}
-
-void h2_io_redo(h2_io *io)
-{
- io->worker_started = 0;
- io->response = NULL;
- io->rst_error = 0;
- if (io->bbin) {
- apr_brigade_cleanup(io->bbin);
- }
- if (io->bbout) {
- apr_brigade_cleanup(io->bbout);
- }
- if (io->bbtmp) {
- apr_brigade_cleanup(io->bbtmp);
- }
- io->started_at = io->done_at = 0;
-}
-
-int h2_io_is_repeatable(h2_io *io) {
- if (io->submitted
- || io->input_consumed > 0
- || !io->request) {
- /* cannot repeat that. */
- return 0;
- }
- return (!strcmp("GET", io->request->method)
- || !strcmp("HEAD", io->request->method)
- || !strcmp("OPTIONS", io->request->method));
-}
-
-void h2_io_set_response(h2_io *io, h2_response *response)
-{
- AP_DEBUG_ASSERT(io->pool);
- AP_DEBUG_ASSERT(response);
- AP_DEBUG_ASSERT(!io->response);
- /* we used to clone the response into the io->pool. But
- * we have much tighter control over the EOR bucket nowadays,
- * so just use the instance given */
- io->response = response;
- if (response->rst_error) {
- h2_io_rst(io, response->rst_error);
- }
- else if (response->content_length == 0) {
- io->eos_out = 1;
- }
-}
-
-void h2_io_rst(h2_io *io, int error)
-{
- io->rst_error = error;
- io->eos_in = 1;
-}
-
-int h2_io_out_has_data(h2_io *io)
-{
- return io->bbout && h2_util_bb_has_data_or_eos(io->bbout);
-}
-
-apr_off_t h2_io_out_length(h2_io *io)
-{
- if (io->bbout) {
- apr_off_t len = 0;
- apr_brigade_length(io->bbout, 0, &len);
- return (len > 0)? len : 0;
- }
- return 0;
-}
-
-apr_status_t h2_io_in_shutdown(h2_io *io)
-{
- if (io->bbin) {
- apr_off_t end_len = 0;
- apr_brigade_length(io->bbin, 1, &end_len);
- io->input_consumed += end_len;
- apr_brigade_cleanup(io->bbin);
- }
- return h2_io_in_close(io);
-}
-
-
-void h2_io_signal_init(h2_io *io, h2_io_op op, apr_interval_time_t timeout,
- apr_thread_cond_t *cond)
-{
- io->timed_op = op;
- io->timed_cond = cond;
- if (timeout > 0) {
- io->timeout_at = apr_time_now() + timeout;
- }
- else {
- io->timeout_at = 0;
- }
-}
-
-void h2_io_signal_exit(h2_io *io)
-{
- io->timed_cond = NULL;
- io->timeout_at = 0;
-}
-
-apr_status_t h2_io_signal_wait(h2_mplx *m, h2_io *io)
-{
- apr_status_t status;
-
- if (io->timeout_at != 0) {
- status = apr_thread_cond_timedwait(io->timed_cond, m->lock, io->timeout_at);
- if (APR_STATUS_IS_TIMEUP(status)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, APLOGNO(03055)
- "h2_mplx(%ld-%d): stream timeout expired: %s",
- m->id, io->id,
- (io->timed_op == H2_IO_READ)? "read" : "write");
- h2_io_rst(io, H2_ERR_CANCEL);
- }
- }
- else {
- apr_thread_cond_wait(io->timed_cond, m->lock);
- status = APR_SUCCESS;
- }
- if (io->orphaned && status == APR_SUCCESS) {
- return APR_ECONNABORTED;
- }
- return status;
-}
-
-void h2_io_signal(h2_io *io, h2_io_op op)
-{
- if (io->timed_cond && (io->timed_op == op || H2_IO_ANY == op)) {
- apr_thread_cond_signal(io->timed_cond);
- }
-}
-
-void h2_io_make_orphaned(h2_io *io, int error)
-{
- io->orphaned = 1;
- if (error) {
- h2_io_rst(io, error);
- }
- /* if someone is waiting, wake him up */
- h2_io_signal(io, H2_IO_ANY);
-}
-
-static int add_trailer(void *ctx, const char *key, const char *value)
-{
- apr_bucket_brigade *bb = ctx;
- apr_status_t status;
-
- status = apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n",
- key, value);
- return (status == APR_SUCCESS);
-}
-
-static apr_status_t in_append_eos(h2_io *io, apr_bucket_brigade *bb,
- apr_table_t *trailers)
-{
- apr_status_t status = APR_SUCCESS;
- apr_table_t *t = io->request->trailers;
-
- if (trailers && t && !apr_is_empty_table(trailers)) {
- /* trailers passed in, transfer directly. */
- apr_table_overlap(trailers, t, APR_OVERLAP_TABLES_SET);
- t = NULL;
- }
-
- if (io->request->chunked) {
- if (t && !apr_is_empty_table(t)) {
- /* no trailers passed in, transfer via chunked */
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
- apr_table_do(add_trailer, bb, t, NULL);
- status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
- }
- else {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
- }
- }
- append_eos(io, bb);
- return status;
-}
-
-apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen, apr_table_t *trailers)
-{
- apr_off_t start_len = 0;
- apr_status_t status;
-
- if (io->rst_error) {
- return APR_ECONNABORTED;
- }
-
- if (!io->bbin || APR_BRIGADE_EMPTY(io->bbin)) {
- if (io->eos_in) {
- if (!io->eos_in_written) {
- status = in_append_eos(io, bb, trailers);
- io->eos_in_written = 1;
- return status;
- }
- return APR_EOF;
- }
- return APR_EAGAIN;
- }
-
- if (io->request->chunked) {
- /* the reader expects HTTP/1.1 chunked encoding */
- check_bbtmp(io);
- status = h2_util_move(io->bbtmp, io->bbin, maxlen, NULL, "h2_io_in_read_chunk");
- if (status == APR_SUCCESS) {
- apr_off_t tmp_len = 0;
-
- apr_brigade_length(io->bbtmp, 1, &tmp_len);
- if (tmp_len > 0) {
- io->input_consumed += tmp_len;
- status = apr_brigade_printf(bb, NULL, NULL, "%lx\r\n",
- (unsigned long)tmp_len);
- if (status == APR_SUCCESS) {
- status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp1");
- if (status == APR_SUCCESS) {
- status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
- }
- }
- }
- else {
- status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp2");
- }
- apr_brigade_cleanup(io->bbtmp);
- }
- }
- else {
- apr_brigade_length(bb, 1, &start_len);
-
- status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read");
- if (status == APR_SUCCESS) {
- apr_off_t end_len = 0;
- apr_brigade_length(bb, 1, &end_len);
- io->input_consumed += (end_len - start_len);
- }
- }
-
- if (status == APR_SUCCESS && (!io->bbin || APR_BRIGADE_EMPTY(io->bbin))) {
- if (io->eos_in) {
- if (!io->eos_in_written) {
- status = in_append_eos(io, bb, trailers);
- io->eos_in_written = 1;
- }
- }
- }
-
- if (status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) {
- return APR_EAGAIN;
- }
- return status;
-}
-
-apr_status_t h2_io_in_write(h2_io *io, const char *d, apr_size_t len, int eos)
-{
- if (io->rst_error) {
- return APR_ECONNABORTED;
- }
-
- if (io->eos_in) {
- return APR_EOF;
- }
- if (eos) {
- io->eos_in = 1;
- }
- if (len > 0) {
- check_bbin(io);
- return apr_brigade_write(io->bbin, NULL, NULL, d, len);
- }
- return APR_SUCCESS;
-}
-
-apr_status_t h2_io_in_close(h2_io *io)
-{
- if (io->rst_error) {
- return APR_ECONNABORTED;
- }
-
- io->eos_in = 1;
- return APR_SUCCESS;
-}
-
-apr_status_t h2_io_out_get_brigade(h2_io *io, apr_bucket_brigade *bb,
- apr_off_t len)
-{
- if (io->rst_error) {
- return APR_ECONNABORTED;
- }
- if (io->eos_out_read) {
- return APR_EOF;
- }
- else if (!io->bbout || APR_BRIGADE_EMPTY(io->bbout)) {
- return APR_EAGAIN;
- }
- else {
- apr_status_t status;
- apr_off_t pre_len, post_len;
- /* Allow file handles pass through without limits. If they
- * already have the lifetime of this stream, we might as well
- * pass them on to the master connection */
- apr_size_t files = INT_MAX;
-
- apr_brigade_length(bb, 0, &pre_len);
- status = h2_util_move(bb, io->bbout, len, &files, "h2_io_read_to");
- if (status == APR_SUCCESS && io->eos_out
- && APR_BRIGADE_EMPTY(io->bbout)) {
- io->eos_out_read = 1;
- }
- apr_brigade_length(bb, 0, &post_len);
- io->output_consumed += (post_len - pre_len);
- return status;
- }
-}
-
-apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen,
- apr_size_t *pfile_buckets_allowed)
-{
- apr_status_t status;
- apr_bucket *b;
- int start_allowed;
-
- if (io->rst_error) {
- return APR_ECONNABORTED;
- }
-
- /* Filter the EOR bucket and set it aside. We prefer to tear down
- * the request when the whole h2 stream is done */
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b))
- {
- if (AP_BUCKET_IS_EOR(b)) {
- APR_BUCKET_REMOVE(b);
- io->eor = b;
- break;
- }
- else if (APR_BUCKET_IS_EOS(b)) {
- io->eos_out = 1;
- break;
- }
- }
-
- /* Let's move the buckets from the request processing in here, so
- * that the main thread can read them when it has time/capacity.
- *
- * Move at most "maxlen" memory bytes. If buckets remain, it is
- * the caller's responsibility to take care of this.
- *
- * We allow passing of file buckets as long as we do not have too
- * many open files already buffered. Otherwise we will run out of
- * file handles.
- */
- check_bbout(io);
- start_allowed = *pfile_buckets_allowed;
- status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed,
- "h2_io_out_write");
- /* track # file buckets moved into our pool */
- if (start_allowed != *pfile_buckets_allowed) {
- io->files_handles_owned += (start_allowed - *pfile_buckets_allowed);
- }
- return status;
-}
-
-
-apr_status_t h2_io_out_close(h2_io *io)
-{
- if (io->rst_error) {
- return APR_ECONNABORTED;
- }
- if (!io->eos_out_read) { /* EOS has not been read yet */
- if (!io->eos_out) {
- check_bbout(io);
- io->eos_out = 1;
- if (!h2_util_has_eos(io->bbout, -1)) {
- append_eos(io, io->bbout);
- }
- }
- }
- return APR_SUCCESS;
-}
diff --git a/modules/http2/h2_io.h b/modules/http2/h2_io.h
deleted file mode 100644
index d700f6f3..00000000
--- a/modules/http2/h2_io.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_io__
-#define __mod_h2__h2_io__
-
-struct h2_response;
-struct apr_thread_cond_t;
-struct h2_mplx;
-struct h2_request;
-struct h2_task;
-
-
-typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
-
-typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx);
-
-typedef enum {
- H2_IO_READ,
- H2_IO_WRITE,
- H2_IO_ANY,
-} h2_io_op;
-
-typedef struct h2_io h2_io;
-
-struct h2_io {
- int id; /* stream identifier */
- apr_pool_t *pool; /* stream pool */
- apr_bucket_alloc_t *bucket_alloc;
-
- const struct h2_request *request;/* request on this io */
- struct h2_response *response; /* response to request */
- int rst_error; /* h2 related stream abort error */
-
- apr_bucket *eor; /* the EOR bucket, set aside */
- struct h2_task *task; /* the task once started */
-
- apr_bucket_brigade *bbin; /* input data for stream */
- apr_bucket_brigade *bbout; /* output data from stream */
- apr_bucket_brigade *bbtmp; /* temporary data for chunking */
-
- unsigned int orphaned : 1; /* h2_stream is gone for this io */
- unsigned int worker_started : 1; /* h2_worker started processing for this io */
- unsigned int worker_done : 1; /* h2_worker finished for this io */
- unsigned int submitted : 1; /* response has been submitted to client */
- unsigned int request_body : 1; /* iff request has body */
- unsigned int eos_in : 1; /* input eos has been seen */
- unsigned int eos_in_written : 1; /* input eos has been forwarded */
- unsigned int eos_out : 1; /* output eos is present */
- unsigned int eos_out_read : 1; /* output eos has been forwarded */
-
- h2_io_op timed_op; /* which operation is waited on, if any */
- struct apr_thread_cond_t *timed_cond; /* condition to wait on, maybe NULL */
- apr_time_t timeout_at; /* when IO wait will time out */
-
- apr_time_t started_at; /* when processing started */
- apr_time_t done_at; /* when processing was done */
- apr_size_t input_consumed; /* how many bytes have been read */
- apr_size_t output_consumed; /* how many bytes have been written out */
-
- int files_handles_owned;
-};
-
-/*******************************************************************************
- * Object lifecycle and information.
- ******************************************************************************/
-
-/**
- * Creates a new h2_io for the given stream id.
- */
-h2_io *h2_io_create(int id, apr_pool_t *pool,
- apr_bucket_alloc_t *bucket_alloc,
- const struct h2_request *request);
-
-/**
- * Set the response of this stream.
- */
-void h2_io_set_response(h2_io *io, struct h2_response *response);
-
-/**
- * Reset the stream with the given error code.
- */
-void h2_io_rst(h2_io *io, int error);
-
-int h2_io_is_repeatable(h2_io *io);
-void h2_io_redo(h2_io *io);
-
-/**
- * Output data is available.
- */
-int h2_io_out_has_data(h2_io *io);
-
-void h2_io_signal(h2_io *io, h2_io_op op);
-void h2_io_signal_init(h2_io *io, h2_io_op op, apr_interval_time_t timeout,
- struct apr_thread_cond_t *cond);
-void h2_io_signal_exit(h2_io *io);
-apr_status_t h2_io_signal_wait(struct h2_mplx *m, h2_io *io);
-
-void h2_io_make_orphaned(h2_io *io, int error);
-
-/*******************************************************************************
- * Input handling of streams.
- ******************************************************************************/
-/**
- * Reads the next bucket from the input. Returns APR_EAGAIN if none
- * is currently available, APR_EOF if end of input has been reached.
- */
-apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen, apr_table_t *trailers);
-
-/**
- * Appends given bucket to the input.
- */
-apr_status_t h2_io_in_write(h2_io *io, const char *d, apr_size_t len, int eos);
-
-/**
- * Closes the input. After existing data has been read, APR_EOF will
- * be returned.
- */
-apr_status_t h2_io_in_close(h2_io *io);
-
-/**
- * Shuts all input down. Will close input and mark any data buffered
- * as consumed.
- */
-apr_status_t h2_io_in_shutdown(h2_io *io);
-
-/*******************************************************************************
- * Output handling of streams.
- ******************************************************************************/
-
-/**
- * Read a bucket from the output head. Return APR_EAGAIN if non is available,
- * APR_EOF if none available and output has been closed.
- * May be called with buffer == NULL in order to find out how much data
- * is available.
- * @param io the h2_io to read output from
- * @param buffer the buffer to copy the data to, may be NULL
- * @param plen the requested max len, set to amount of data on return
- * @param peos != 0 iff the end of stream has been reached
- */
-apr_status_t h2_io_out_get_brigade(h2_io *io,
- apr_bucket_brigade *bb,
- apr_off_t len);
-
-apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen,
- apr_size_t *pfile_buckets_allowed);
-
-/**
- * Closes the input. After existing data has been read, APR_EOF will
- * be returned.
- */
-apr_status_t h2_io_out_close(h2_io *io);
-
-/**
- * Gives the overall length of the data that is currently queued for
- * output.
- */
-apr_off_t h2_io_out_length(h2_io *io);
-
-
-#endif /* defined(__mod_h2__h2_io__) */
diff --git a/modules/http2/h2_io_set.c b/modules/http2/h2_io_set.c
deleted file mode 100644
index e0949795..00000000
--- a/modules/http2/h2_io_set.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <apr_strings.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_connection.h>
-#include <http_log.h>
-
-#include "h2_private.h"
-#include "h2_io.h"
-#include "h2_io_set.h"
-
-#define h2_io_IDX(list, i) ((h2_io**)(list)->elts)[i]
-
-struct h2_io_set {
- apr_array_header_t *list;
-};
-
-h2_io_set *h2_io_set_create(apr_pool_t *pool)
-{
- h2_io_set *sp = apr_pcalloc(pool, sizeof(h2_io_set));
- if (sp) {
- sp->list = apr_array_make(pool, 100, sizeof(h2_io*));
- if (!sp->list) {
- return NULL;
- }
- }
- return sp;
-}
-
-static int h2_stream_id_cmp(const void *s1, const void *s2)
-{
- h2_io **pio1 = (h2_io **)s1;
- h2_io **pio2 = (h2_io **)s2;
- return (*pio1)->id - (*pio2)->id;
-}
-
-h2_io *h2_io_set_get(h2_io_set *sp, int stream_id)
-{
- /* we keep the array sorted by id, so lookup can be done
- * by bsearch.
- */
- h2_io **ps;
- h2_io key;
- h2_io *pkey = &key;
-
- memset(&key, 0, sizeof(key));
- key.id = stream_id;
- ps = bsearch(&pkey, sp->list->elts, sp->list->nelts,
- sp->list->elt_size, h2_stream_id_cmp);
- return ps? *ps : NULL;
-}
-
-static void h2_io_set_sort(h2_io_set *sp)
-{
- qsort(sp->list->elts, sp->list->nelts, sp->list->elt_size,
- h2_stream_id_cmp);
-}
-
-apr_status_t h2_io_set_add(h2_io_set *sp, h2_io *io)
-{
- h2_io *existing = h2_io_set_get(sp, io->id);
- if (!existing) {
- int last;
- APR_ARRAY_PUSH(sp->list, h2_io*) = io;
- /* Normally, streams get added in ascending order if id. We
- * keep the array sorted, so we just need to check if the newly
- * appended stream has a lower id than the last one. if not,
- * sorting is not necessary.
- */
- last = sp->list->nelts - 1;
- if (last > 0
- && (h2_io_IDX(sp->list, last)->id
- < h2_io_IDX(sp->list, last-1)->id)) {
- h2_io_set_sort(sp);
- }
- }
- return APR_SUCCESS;
-}
-
-static void remove_idx(h2_io_set *sp, int idx)
-{
- int n;
- --sp->list->nelts;
- n = sp->list->nelts - idx;
- if (n > 0) {
- /* There are n h2_io* behind idx. Move the rest down */
- h2_io **selts = (h2_io**)sp->list->elts;
- memmove(selts + idx, selts + idx + 1, n * sizeof(h2_io*));
- }
-}
-
-h2_io *h2_io_set_remove(h2_io_set *sp, h2_io *io)
-{
- int i;
- for (i = 0; i < sp->list->nelts; ++i) {
- h2_io *e = h2_io_IDX(sp->list, i);
- if (e->id == io->id) {
- remove_idx(sp, i);
- return e;
- }
- }
- return NULL;
-}
-
-h2_io *h2_io_set_shift(h2_io_set *set)
-{
- /* For now, this just removes the first element in the set.
- * the name is misleading...
- */
- if (set->list->nelts > 0) {
- h2_io *io = h2_io_IDX(set->list, 0);
- remove_idx(set, 0);
- return io;
- }
- return NULL;
-}
-
-int h2_io_set_is_empty(h2_io_set *sp)
-{
- AP_DEBUG_ASSERT(sp);
- return sp->list->nelts == 0;
-}
-
-int h2_io_set_iter(h2_io_set *sp,
- h2_io_set_iter_fn *iter, void *ctx)
-{
- int i;
- for (i = 0; i < sp->list->nelts; ++i) {
- h2_io *s = h2_io_IDX(sp->list, i);
- if (!iter(ctx, s)) {
- return 0;
- }
- }
- return 1;
-}
-
-apr_size_t h2_io_set_size(h2_io_set *sp)
-{
- return sp->list->nelts;
-}
-
diff --git a/modules/http2/h2_io_set.h b/modules/http2/h2_io_set.h
deleted file mode 100644
index 936e7252..00000000
--- a/modules/http2/h2_io_set.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_io_set__
-#define __mod_h2__h2_io_set__
-
-struct h2_io;
-
-/**
- * A set of h2_io instances. Allows lookup by stream id
- * and other criteria.
- */
-typedef struct h2_io_set h2_io_set;
-
-h2_io_set *h2_io_set_create(apr_pool_t *pool);
-
-apr_status_t h2_io_set_add(h2_io_set *set, struct h2_io *io);
-h2_io *h2_io_set_get(h2_io_set *set, int stream_id);
-h2_io *h2_io_set_remove(h2_io_set *set, struct h2_io *io);
-
-int h2_io_set_is_empty(h2_io_set *set);
-apr_size_t h2_io_set_size(h2_io_set *set);
-
-
-typedef int h2_io_set_iter_fn(void *ctx, struct h2_io *io);
-
-/**
- * Iterator over all h2_io* in the set or until a
- * callback returns 0. It is not safe to add or remove
- * set members during iteration.
- *
- * @param set the set of h2_io to iterate over
- * @param iter the function to call for each io
- * @param ctx user data for the callback
- * @return 1 iff iteration completed for all members
- */
-int h2_io_set_iter(h2_io_set *set, h2_io_set_iter_fn *iter, void *ctx);
-
-h2_io *h2_io_set_shift(h2_io_set *set);
-
-#endif /* defined(__mod_h2__h2_io_set__) */
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
index a4dbf1f4..001eb7f6 100644
--- a/modules/http2/h2_mplx.c
+++ b/modules/http2/h2_mplx.c
@@ -29,38 +29,46 @@
#include "mod_http2.h"
#include "h2_private.h"
+#include "h2_bucket_beam.h"
#include "h2_config.h"
#include "h2_conn.h"
#include "h2_ctx.h"
#include "h2_h2.h"
-#include "h2_int_queue.h"
-#include "h2_io.h"
-#include "h2_io_set.h"
#include "h2_response.h"
#include "h2_mplx.h"
#include "h2_ngn_shed.h"
#include "h2_request.h"
#include "h2_stream.h"
#include "h2_task.h"
-#include "h2_task_input.h"
-#include "h2_task_output.h"
#include "h2_worker.h"
#include "h2_workers.h"
#include "h2_util.h"
-#define H2_MPLX_IO_OUT(lvl,m,io,msg) \
- do { \
- if (APLOG_C_IS_LEVEL((m)->c,lvl)) \
- h2_util_bb_log((m)->c,(io)->id,lvl,msg,(io)->bbout); \
- } while(0)
-
-#define H2_MPLX_IO_IN(lvl,m,io,msg) \
- do { \
- if (APLOG_C_IS_LEVEL((m)->c,lvl)) \
- h2_util_bb_log((m)->c,(io)->id,lvl,msg,(io)->bbin); \
- } while(0)
+static void h2_beam_log(h2_bucket_beam *beam, int id, const char *msg,
+ conn_rec *c, int level)
+{
+ if (beam && APLOG_C_IS_LEVEL(c,level)) {
+ char buffer[2048];
+ apr_size_t off = 0;
+
+ off += apr_snprintf(buffer+off, H2_ALEN(buffer)-off, "cl=%d, ", beam->closed);
+ off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "red", ", ", &beam->red);
+ off += h2_util_bb_print(buffer+off, H2_ALEN(buffer)-off, "green", ", ", beam->green);
+ off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "hold", ", ", &beam->hold);
+ off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "purge", "", &beam->purge);
+
+ ap_log_cerror(APLOG_MARK, level, 0, c, "beam(%ld-%d): %s %s",
+ c->id, id, msg, buffer);
+ }
+}
+/* utility for iterating over ihash task sets */
+typedef struct {
+ h2_mplx *m;
+ h2_task *task;
+ apr_time_t now;
+} task_iter_ctx;
/* NULL or the mutex hold by this thread, used for recursive calls
*/
@@ -82,12 +90,14 @@ static apr_status_t enter_mutex(h2_mplx *m, int *pacquired)
* This allow recursive entering of the mutex from the saem thread,
* which is what we need in certain situations involving callbacks
*/
+ AP_DEBUG_ASSERT(m);
apr_threadkey_private_get(&mutex, thread_lock);
if (mutex == m->lock) {
*pacquired = 0;
return APR_SUCCESS;
}
-
+
+ AP_DEBUG_ASSERT(m->lock);
status = apr_thread_mutex_lock(m->lock);
*pacquired = (status == APR_SUCCESS);
if (*pacquired) {
@@ -104,23 +114,68 @@ static void leave_mutex(h2_mplx *m, int acquired)
}
}
-static int is_aborted(h2_mplx *m, apr_status_t *pstatus)
+static void beam_leave(void *ctx, apr_thread_mutex_t *lock)
{
- AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- *pstatus = APR_ECONNABORTED;
+ leave_mutex(ctx, 1);
+}
+
+static apr_status_t beam_enter(void *ctx, h2_beam_lock *pbl)
+{
+ h2_mplx *m = ctx;
+ int acquired;
+ apr_status_t status;
+
+ status = enter_mutex(m, &acquired);
+ if (status == APR_SUCCESS) {
+ pbl->mutex = m->lock;
+ pbl->leave = acquired? beam_leave : NULL;
+ pbl->leave_ctx = m;
+ }
+ return status;
+}
+
+static void stream_output_consumed(void *ctx,
+ h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_task *task = ctx;
+ if (length > 0 && task && task->assigned) {
+ h2_req_engine_out_consumed(task->assigned, task->c, length);
+ }
+}
+
+static void stream_input_consumed(void *ctx,
+ h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_mplx *m = ctx;
+ if (m->input_consumed && length) {
+ m->input_consumed(m->input_consumed_ctx, beam->id, length);
+ }
+}
+
+static int can_beam_file(void *ctx, h2_bucket_beam *beam, apr_file_t *file)
+{
+ h2_mplx *m = ctx;
+ if (m->tx_handles_reserved > 0) {
+ --m->tx_handles_reserved;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): beaming file %s, tx_avail %d",
+ m->id, beam->id, beam->tag, m->tx_handles_reserved);
return 1;
}
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): can_beam_file denied on %s",
+ m->id, beam->id, beam->tag);
return 0;
}
static void have_out_data_for(h2_mplx *m, int stream_id);
+static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master);
static void check_tx_reservation(h2_mplx *m)
{
- if (m->tx_handles_reserved == 0) {
+ if (m->tx_handles_reserved <= 0) {
m->tx_handles_reserved += h2_workers_tx_reserve(m->workers,
- H2MIN(m->tx_chunk_size, h2_io_set_size(m->stream_ios)));
+ H2MIN(m->tx_chunk_size, h2_ihash_count(m->tasks)));
}
}
@@ -131,19 +186,41 @@ static void check_tx_free(h2_mplx *m)
m->tx_handles_reserved = m->tx_chunk_size;
h2_workers_tx_free(m->workers, count);
}
- else if (m->tx_handles_reserved
- && (!m->stream_ios || h2_io_set_is_empty(m->stream_ios))) {
+ else if (m->tx_handles_reserved && h2_ihash_empty(m->tasks)) {
h2_workers_tx_free(m->workers, m->tx_handles_reserved);
m->tx_handles_reserved = 0;
}
}
+static int purge_stream(void *ctx, void *val)
+{
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ h2_task *task = h2_ihash_get(m->tasks, stream->id);
+ h2_ihash_remove(m->spurge, stream->id);
+ h2_stream_destroy(stream);
+ if (task) {
+ task_destroy(m, task, 1);
+ }
+ return 0;
+}
+
+static void purge_streams(h2_mplx *m)
+{
+ if (!h2_ihash_empty(m->spurge)) {
+ while(!h2_ihash_iter(m->spurge, purge_stream, m)) {
+ /* repeat until empty */
+ }
+ h2_ihash_clear(m->spurge);
+ }
+}
+
static void h2_mplx_destroy(h2_mplx *m)
{
AP_DEBUG_ASSERT(m);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): destroy, ios=%d",
- m->id, (int)h2_io_set_size(m->stream_ios));
+ "h2_mplx(%ld): destroy, tasks=%d",
+ m->id, (int)h2_ihash_count(m->tasks));
check_tx_free(m);
if (m->pool) {
apr_pool_destroy(m->pool);
@@ -204,9 +281,15 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
m->bucket_alloc = apr_bucket_alloc_create(m->pool);
m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
+
+ m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->q = h2_iq_create(m->pool, m->max_streams);
- m->stream_ios = h2_io_set_create(m->pool);
- m->ready_ios = h2_io_set_create(m->pool);
+ m->sready = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->sresume = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
+
m->stream_timeout = stream_timeout;
m->workers = workers;
m->workers_max = workers->max_workers;
@@ -240,75 +323,66 @@ apr_uint32_t h2_mplx_shutdown(h2_mplx *m)
return max_stream_started;
}
-static void workers_register(h2_mplx *m)
-{
- /* h2_workers is only a hub for all the h2_worker instances.
- * At the end-of-life of this h2_mplx, we always unregister at
- * the workers. The thing to manage are all the h2_worker instances
- * out there. Those may hold a reference to this h2_mplx and we cannot
- * call them to unregister.
- *
- * Therefore: ref counting for h2_workers in not needed, ref counting
- * for h2_worker using this is critical.
- */
- m->need_registration = 0;
- h2_workers_register(m->workers, m);
-}
-
-static int io_in_consumed_signal(h2_mplx *m, h2_io *io)
+static void input_consumed_signal(h2_mplx *m, h2_stream *stream)
{
- if (io->input_consumed && m->input_consumed) {
- m->input_consumed(m->input_consumed_ctx,
- io->id, io->input_consumed);
- io->input_consumed = 0;
- return 1;
+ if (stream->input && stream->started) {
+ h2_beam_send(stream->input, NULL, 0); /* trigger updates */
}
- return 0;
}
-static int io_out_consumed_signal(h2_mplx *m, h2_io *io)
+static int output_consumed_signal(h2_mplx *m, h2_task *task)
{
- if (io->output_consumed && io->task && io->task->assigned) {
- h2_req_engine_out_consumed(io->task->assigned, io->task->c,
- io->output_consumed);
- io->output_consumed = 0;
- return 1;
+ if (task->output.beam && task->worker_started && task->assigned) {
+ /* trigger updates */
+ h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
}
return 0;
}
-static void io_destroy(h2_mplx *m, h2_io *io, int events)
+
+static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master)
{
- int reuse_slave;
+ conn_rec *slave = NULL;
+ int reuse_slave = 0;
+ apr_status_t status;
- /* cleanup any buffered input */
- h2_io_in_shutdown(io);
- if (events) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_task(%s): destroy", task->id);
+ if (called_from_master) {
/* Process outstanding events before destruction */
- io_in_consumed_signal(m, io);
+ h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ input_consumed_signal(m, stream);
+ }
}
/* The pool is cleared/destroyed which also closes all
* allocated file handles. Give this count back to our
* file handle pool. */
- m->tx_handles_reserved += io->files_handles_owned;
-
- h2_io_set_remove(m->stream_ios, io);
- h2_io_set_remove(m->ready_ios, io);
- if (m->redo_ios) {
- h2_io_set_remove(m->redo_ios, io);
+ if (task->output.beam) {
+ m->tx_handles_reserved +=
+ h2_beam_get_files_beamed(task->output.beam);
+ h2_beam_on_produced(task->output.beam, NULL, NULL);
+ status = h2_beam_shutdown(task->output.beam, APR_NONBLOCK_READ, 1);
+ if (status != APR_SUCCESS){
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, m->c,
+ APLOGNO(03385) "h2_task(%s): output shutdown "
+ "incomplete", task->id);
+ }
}
-
+
+ slave = task->c;
reuse_slave = ((m->spare_slaves->nelts < m->spare_slaves->nalloc)
- && !io->rst_error && io->eor);
- if (io->task) {
- conn_rec *slave = io->task->c;
- h2_task_destroy(io->task);
- io->task = NULL;
-
+ && !task->rst_error);
+
+ h2_ihash_remove(m->tasks, task->stream_id);
+ if (m->redo_tasks) {
+ h2_ihash_remove(m->redo_tasks, task->stream_id);
+ }
+ h2_task_destroy(task);
+
+ if (slave) {
if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
- apr_bucket_delete(io->eor);
- io->eor = NULL;
APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
}
else {
@@ -316,59 +390,104 @@ static void io_destroy(h2_mplx *m, h2_io *io, int events)
h2_slave_destroy(slave, NULL);
}
}
-
- if (io->pool) {
- apr_pool_destroy(io->pool);
- }
-
+
check_tx_free(m);
}
-static int io_stream_done(h2_mplx *m, h2_io *io, int rst_error)
+static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error)
{
- /* Remove io from ready set, we will never submit it */
- h2_io_set_remove(m->ready_ios, io);
- if (!io->worker_started || io->worker_done) {
- /* already finished or not even started yet */
- h2_iq_remove(m->q, io->id);
- io_destroy(m, io, 1);
- return 0;
+ h2_task *task;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_stream(%ld-%d): done", m->c->id, stream->id);
+ /* Situation: we are, on the master connection, done with processing
+ * the stream. Either we have handled it successfully, or the stream
+ * was reset by the client or the connection is gone and we are
+ * shutting down the whole session.
+ *
+ * We possibly have created a task for this stream to be processed
+ * on a slave connection. The processing might actually be ongoing
+ * right now or has already finished. A finished task waits for its
+ * stream to be done. This is the common case.
+ *
+ * If the stream had input (e.g. the request had a body), a task
+ * may have read, or is still reading buckets from the input beam.
+ * This means that the task is referencing memory from the stream's
+ * pool (or the master connection bucket alloc). Before we can free
+ * the stream pool, we need to make sure that those references are
+ * gone. This is what h2_beam_shutdown() on the input waits for.
+ *
+ * With the input handled, we can tear down that beam and care
+ * about the output beam. The stream might still have buffered some
+ * buckets read from the output, so we need to get rid of those. That
+ * is done by h2_stream_cleanup().
+ *
+ * Now it is save to destroy the task (if it exists and is finished).
+ *
+ * FIXME: we currently destroy the stream, even if the task is still
+ * ongoing. This is not ok, since task->request is coming from stream
+ * memory. We should either copy it on task creation or wait with the
+ * stream destruction until the task is done.
+ */
+ h2_iq_remove(m->q, stream->id);
+ h2_ihash_remove(m->sready, stream->id);
+ h2_ihash_remove(m->sresume, stream->id);
+ h2_ihash_remove(m->streams, stream->id);
+ if (stream->input) {
+ m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input);
+ h2_beam_on_consumed(stream->input, NULL, NULL);
+ /* Let anyone blocked reading know that there is no more to come */
+ h2_beam_abort(stream->input);
+ /* Remove mutex after, so that abort still finds cond to signal */
+ h2_beam_mutex_set(stream->input, NULL, NULL, NULL);
}
- else {
- /* cleanup once task is done */
- h2_io_make_orphaned(io, rst_error);
- return 1;
+ h2_stream_cleanup(stream);
+
+ task = h2_ihash_get(m->tasks, stream->id);
+ if (task) {
+ if (!task->worker_done) {
+ /* task still running, cleanup once it is done */
+ if (rst_error) {
+ h2_task_rst(task, rst_error);
+ }
+ h2_ihash_add(m->shold, stream);
+ return;
+ }
+ else {
+ /* already finished */
+ task_destroy(m, task, 0);
+ }
}
+ h2_stream_destroy(stream);
}
-static int stream_done_iter(void *ctx, h2_io *io)
+static int stream_done_iter(void *ctx, void *val)
{
- return io_stream_done((h2_mplx*)ctx, io, 0);
+ stream_done((h2_mplx*)ctx, val, 0);
+ return 0;
}
-static int stream_print(void *ctx, h2_io *io)
+static int task_print(void *ctx, void *val)
{
h2_mplx *m = ctx;
- if (io && io->request) {
+ h2_task *task = val;
+
+ if (task && task->request) {
+ h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
+
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
- "->03198: h2_stream(%ld-%d): %s %s %s -> %s %d"
- "[orph=%d/started=%d/done=%d/eos_in=%d/eos_out=%d]",
- m->id, io->id,
- io->request->method, io->request->authority, io->request->path,
- io->response? "http" : (io->rst_error? "reset" : "?"),
- io->response? io->response->http_status : io->rst_error,
- io->orphaned, io->worker_started, io->worker_done,
- io->eos_in, io->eos_out);
- }
- else if (io) {
+ "->03198: h2_stream(%s): %s %s %s -> %s %d"
+ "[orph=%d/started=%d/done=%d]",
+ task->id, task->request->method,
+ task->request->authority, task->request->path,
+ task->response? "http" : (task->rst_error? "reset" : "?"),
+ task->response? task->response->http_status : task->rst_error,
+ (stream? 0 : 1), task->worker_started,
+ task->worker_done);
+ }
+ else if (task) {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
- "->03198: h2_stream(%ld-%d): NULL -> %s %d"
- "[orph=%d/started=%d/done=%d/eos_in=%d/eos_out=%d]",
- m->id, io->id,
- io->response? "http" : (io->rst_error? "reset" : "?"),
- io->response? io->response->http_status : io->rst_error,
- io->orphaned, io->worker_started, io->worker_done,
- io->eos_in, io->eos_out);
+ "->03198: h2_stream(%ld-%d): NULL", m->id, task->stream_id);
}
else {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
@@ -377,6 +496,32 @@ static int stream_print(void *ctx, h2_io *io)
return 1;
}
+static int task_abort_connection(void *ctx, void *val)
+{
+ h2_task *task = val;
+ if (task->c) {
+ task->c->aborted = 1;
+ }
+ if (task->input.beam) {
+ h2_beam_abort(task->input.beam);
+ }
+ if (task->output.beam) {
+ h2_beam_abort(task->output.beam);
+ }
+ return 1;
+}
+
+static int report_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld-%d): exists, started=%d, scheduled=%d, "
+ "submitted=%d, suspended=%d",
+ m->id, stream->id, stream->started, stream->scheduled,
+ stream->submitted, stream->suspended);
+ return 1;
+}
+
apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
{
apr_status_t status;
@@ -386,30 +531,62 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
int i, wait_secs = 5;
+
+ if (!h2_ihash_empty(m->streams) && APLOGctrace1(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): release_join with %d streams open, "
+ "%d streams resume, %d streams ready, %d tasks",
+ m->id, (int)h2_ihash_count(m->streams),
+ (int)h2_ihash_count(m->sresume),
+ (int)h2_ihash_count(m->sready),
+ (int)h2_ihash_count(m->tasks));
+ h2_ihash_iter(m->streams, report_stream_iter, m);
+ }
/* disable WINDOW_UPDATE callbacks */
h2_mplx_set_consumed_cb(m, NULL, NULL);
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): start release_join with %d streams in hold",
+ m->id, (int)h2_ihash_count(m->shold));
+ }
+ if (!h2_ihash_empty(m->spurge)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): start release_join with %d streams to purge",
+ m->id, (int)h2_ihash_count(m->spurge));
+ }
+
h2_iq_clear(m->q);
apr_thread_cond_broadcast(m->task_thawed);
- while (!h2_io_set_iter(m->stream_ios, stream_done_iter, m)) {
- /* iterate until all ios have been orphaned or destroyed */
+ while (!h2_ihash_iter(m->streams, stream_done_iter, m)) {
+ /* iterate until all streams have been removed */
}
+ AP_DEBUG_ASSERT(h2_ihash_empty(m->streams));
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): 2. release_join with %d streams in hold",
+ m->id, (int)h2_ihash_count(m->shold));
+ }
+ if (!h2_ihash_empty(m->spurge)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): 2. release_join with %d streams to purge",
+ m->id, (int)h2_ihash_count(m->spurge));
+ }
+
/* If we still have busy workers, we cannot release our memory
- * pool yet, as slave connections have child pools of their respective
- * h2_io's.
- * Any remaining ios are processed in these workers. Any operation
- * they do on their input/outputs will be errored ECONNRESET/ABORTED,
- * so processing them should fail and workers *should* return.
+ * pool yet, as tasks have references to us.
+ * Any operation on the task slave connection will from now on
+ * be errored ECONNRESET/ABORTED, so processing them should fail
+ * and workers *should* return in a timely fashion.
*/
for (i = 0; m->workers_busy > 0; ++i) {
+ h2_ihash_iter(m->tasks, task_abort_connection, m);
+
m->join_wait = wait;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): release_join, waiting on %d worker to report back",
- m->id, (int)h2_io_set_size(m->stream_ios));
-
status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
+
if (APR_STATUS_IS_TIMEUP(status)) {
if (i > 0) {
/* Oh, oh. Still we wait for assigned workers to report that
@@ -419,11 +596,11 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
*/
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03198)
"h2_mplx(%ld): release, waiting for %d seconds now for "
- "%d h2_workers to return, have still %d requests outstanding",
+ "%d h2_workers to return, have still %d tasks outstanding",
m->id, i*wait_secs, m->workers_busy,
- (int)h2_io_set_size(m->stream_ios));
+ (int)h2_ihash_count(m->tasks));
if (i == 1) {
- h2_io_set_iter(m->stream_ios, stream_print, m);
+ h2_ihash_iter(m->tasks, task_print, m);
}
}
h2_mplx_abort(m);
@@ -431,13 +608,21 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
}
}
- if (!h2_io_set_is_empty(m->stream_ios)) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
- "h2_mplx(%ld): release_join, %d streams still open",
- m->id, (int)h2_io_set_size(m->stream_ios));
+ AP_DEBUG_ASSERT(h2_ihash_empty(m->shold));
+ if (!h2_ihash_empty(m->spurge)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): 3. release_join %d streams to purge",
+ m->id, (int)h2_ihash_count(m->spurge));
+ purge_streams(m);
+ }
+ AP_DEBUG_ASSERT(h2_ihash_empty(m->spurge));
+
+ if (!h2_ihash_empty(m->tasks)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03056)
+ "h2_mplx(%ld): release_join -> destroy, "
+ "%d tasks still present",
+ m->id, (int)h2_ihash_count(m->tasks));
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03056)
- "h2_mplx(%ld): release_join -> destroy", m->id);
leave_mutex(m, acquired);
h2_mplx_destroy(m);
/* all gone */
@@ -457,112 +642,18 @@ void h2_mplx_abort(h2_mplx *m)
}
}
-apr_status_t h2_mplx_stream_done(h2_mplx *m, int stream_id, int rst_error)
+apr_status_t h2_mplx_stream_done(h2_mplx *m, h2_stream *stream)
{
apr_status_t status = APR_SUCCESS;
int acquired;
- /* This maybe called from inside callbacks that already hold the lock.
- * E.g. when we are streaming out DATA and the EOF triggers the stream
- * release.
- */
AP_DEBUG_ASSERT(m);
if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
-
- /* there should be an h2_io, once the stream has been scheduled
- * for processing, e.g. when we received all HEADERs. But when
- * a stream is cancelled very early, it will not exist. */
- if (io) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld-%d): marking stream as done.",
- m->id, stream_id);
- io_stream_done(m, io, rst_error);
- }
- leave_mutex(m, acquired);
- }
- return status;
-}
-
-apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block,
- int stream_id, apr_bucket_brigade *bb,
- apr_table_t *trailers,
- struct apr_thread_cond_t *iowait)
-{
- apr_status_t status;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_read_pre");
-
- h2_io_signal_init(io, H2_IO_READ, m->stream_timeout, iowait);
- status = h2_io_in_read(io, bb, -1, trailers);
- while (APR_STATUS_IS_EAGAIN(status)
- && !is_aborted(m, &status)
- && block == APR_BLOCK_READ) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
- "h2_mplx(%ld-%d): wait on in data (BLOCK_READ)",
- m->id, stream_id);
- status = h2_io_signal_wait(m, io);
- if (status == APR_SUCCESS) {
- status = h2_io_in_read(io, bb, -1, trailers);
- }
- }
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_read_post");
- h2_io_signal_exit(io);
- }
- else {
- status = APR_EOF;
- }
- leave_mutex(m, acquired);
- }
- return status;
-}
-
-apr_status_t h2_mplx_in_write(h2_mplx *m, int stream_id,
- const char *data, apr_size_t len, int eos)
-{
- apr_status_t status;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_write_pre");
- status = h2_io_in_write(io, data, len, eos);
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_write_post");
- h2_io_signal(io, H2_IO_READ);
- io_in_consumed_signal(m, io);
- }
- else {
- status = APR_ECONNABORTED;
- }
- leave_mutex(m, acquired);
- }
- return status;
-}
-
-apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id)
-{
- apr_status_t status;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- status = h2_io_in_close(io);
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_close");
- h2_io_signal(io, H2_IO_READ);
- io_in_consumed_signal(m, io);
- }
- else {
- status = APR_ECONNABORTED;
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld-%d): marking stream as done.",
+ m->id, stream->id);
+ stream_done(m, stream, stream->rst_error);
+ purge_streams(m);
leave_mutex(m, acquired);
}
return status;
@@ -574,215 +665,42 @@ void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx)
m->input_consumed_ctx = ctx;
}
-typedef struct {
- h2_mplx * m;
- int streams_updated;
-} update_ctx;
-
-static int update_window(void *ctx, h2_io *io)
-{
- update_ctx *uctx = (update_ctx*)ctx;
- if (io_in_consumed_signal(uctx->m, io)) {
- ++uctx->streams_updated;
- }
- return 1;
-}
-
-apr_status_t h2_mplx_in_update_windows(h2_mplx *m)
+static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response)
{
- apr_status_t status;
- int acquired;
+ apr_status_t status = APR_SUCCESS;
+ h2_task *task = h2_ihash_get(m->tasks, stream_id);
+ h2_stream *stream = h2_ihash_get(m->streams, stream_id);
- AP_DEBUG_ASSERT(m);
- if (m->aborted) {
+ if (!task || !stream) {
return APR_ECONNABORTED;
}
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- update_ctx ctx;
-
- ctx.m = m;
- ctx.streams_updated = 0;
-
- status = APR_EAGAIN;
- h2_io_set_iter(m->stream_ios, update_window, &ctx);
-
- if (ctx.streams_updated) {
- status = APR_SUCCESS;
- }
- leave_mutex(m, acquired);
- }
- return status;
-}
-
-apr_status_t h2_mplx_out_get_brigade(h2_mplx *m, int stream_id,
- apr_bucket_brigade *bb,
- apr_off_t len, apr_table_t **ptrailers)
-{
- apr_status_t status;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_get_brigade_pre");
-
- status = h2_io_out_get_brigade(io, bb, len);
-
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_get_brigade_post");
- if (status == APR_SUCCESS) {
- h2_io_signal(io, H2_IO_WRITE);
- }
- }
- else {
- status = APR_ECONNABORTED;
- }
- *ptrailers = io->response? io->response->trailers : NULL;
- leave_mutex(m, acquired);
- }
- return status;
-}
-
-h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_ihash_t *streams)
-{
- apr_status_t status;
- h2_stream *stream = NULL;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_shift(m->ready_ios);
- if (io && !m->aborted) {
- stream = h2_ihash_get(streams, io->id);
- if (stream) {
- io->submitted = 1;
- if (io->rst_error) {
- h2_stream_rst(stream, io->rst_error);
- }
- else {
- AP_DEBUG_ASSERT(io->response);
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_next_submit_pre");
- h2_stream_set_response(stream, io->response, io->bbout);
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_next_submit_post");
- }
- }
- else {
- /* We have the io ready, but the stream has gone away, maybe
- * reset by the client. Should no longer happen since such
- * streams should clear io's from the ready queue.
- */
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03347)
- "h2_mplx(%ld): stream for response %d closed, "
- "resetting io to close request processing",
- m->id, io->id);
- h2_io_make_orphaned(io, H2_ERR_STREAM_CLOSED);
- if (!io->worker_started || io->worker_done) {
- io_destroy(m, io, 1);
- }
- else {
- /* hang around until the h2_task is done, but
- * shutdown input and send out any events (e.g. window
- * updates) asap. */
- h2_io_in_shutdown(io);
- io_in_consumed_signal(m, io);
- }
- }
-
- h2_io_signal(io, H2_IO_WRITE);
- }
- leave_mutex(m, acquired);
- }
- return stream;
-}
-
-static apr_status_t out_write(h2_mplx *m, h2_io *io,
- ap_filter_t* f, int blocking,
- apr_bucket_brigade *bb,
- struct apr_thread_cond_t *iowait)
-{
- apr_status_t status = APR_SUCCESS;
- /* We check the memory footprint queued for this stream_id
- * and block if it exceeds our configured limit.
- * We will not split buckets to enforce the limit to the last
- * byte. After all, the bucket is already in memory.
- */
- while (status == APR_SUCCESS
- && !APR_BRIGADE_EMPTY(bb)
- && !is_aborted(m, &status)) {
-
- status = h2_io_out_write(io, bb, blocking? m->stream_max_mem : INT_MAX,
- &m->tx_handles_reserved);
- io_out_consumed_signal(m, io);
-
- /* Wait for data to drain until there is room again or
- * stream timeout expires */
- h2_io_signal_init(io, H2_IO_WRITE, m->stream_timeout, iowait);
- while (status == APR_SUCCESS
- && !APR_BRIGADE_EMPTY(bb)
- && iowait
- && (m->stream_max_mem <= h2_io_out_length(io))
- && !is_aborted(m, &status)) {
- if (!blocking) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_mplx(%ld-%d): incomplete write",
- m->id, io->id);
- return APR_INCOMPLETE;
- }
- if (f) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_mplx(%ld-%d): waiting for out drain",
- m->id, io->id);
- }
- status = h2_io_signal_wait(m, io);
- }
- h2_io_signal_exit(io);
- }
- apr_brigade_cleanup(bb);
- return status;
-}
-
-static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response,
- ap_filter_t* f, apr_bucket_brigade *bb,
- struct apr_thread_cond_t *iowait)
-{
- apr_status_t status = APR_SUCCESS;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%s): open response: %d, rst=%d",
+ task->id, response->http_status, response->rst_error);
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- if (f) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_mplx(%ld-%d): open response: %d, rst=%d",
- m->id, stream_id, response->http_status,
- response->rst_error);
- }
-
- h2_io_set_response(io, response);
- h2_io_set_add(m->ready_ios, io);
- if (response && response->http_status < 300) {
- /* we might see some file buckets in the output, see
- * if we have enough handles reserved. */
- check_tx_reservation(m);
- }
- if (bb) {
- status = out_write(m, io, f, 0, bb, iowait);
- if (status == APR_INCOMPLETE) {
- /* write will have transferred as much data as possible.
- caller has to deal with non-empty brigade */
- status = APR_SUCCESS;
- }
- }
- have_out_data_for(m, stream_id);
+ h2_task_set_response(task, response);
+
+ if (task->output.beam) {
+ h2_beam_buffer_size_set(task->output.beam, m->stream_max_mem);
+ h2_beam_timeout_set(task->output.beam, m->stream_timeout);
+ h2_beam_on_consumed(task->output.beam, stream_output_consumed, task);
+ m->tx_handles_reserved -= h2_beam_get_files_beamed(task->output.beam);
+ h2_beam_on_file_beam(task->output.beam, can_beam_file, m);
+ h2_beam_mutex_set(task->output.beam, beam_enter, task->cond, m);
}
- else {
- status = APR_ECONNABORTED;
+
+ h2_ihash_add(m->sready, stream);
+ if (response && response->http_status < 300) {
+ /* we might see some file buckets in the output, see
+ * if we have enough handles reserved. */
+ check_tx_reservation(m);
}
+ have_out_data_for(m, stream_id);
return status;
}
-apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response,
- ap_filter_t* f, apr_bucket_brigade *bb,
- struct apr_thread_cond_t *iowait)
+apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response)
{
apr_status_t status;
int acquired;
@@ -793,125 +711,47 @@ apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response,
status = APR_ECONNABORTED;
}
else {
- status = out_open(m, stream_id, response, f, bb, iowait);
- if (APLOGctrace1(m->c)) {
- h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb);
- }
+ status = out_open(m, stream_id, response);
}
leave_mutex(m, acquired);
}
return status;
}
-apr_status_t h2_mplx_out_write(h2_mplx *m, int stream_id,
- ap_filter_t* f, int blocking,
- apr_bucket_brigade *bb,
- struct apr_thread_cond_t *iowait)
+static apr_status_t out_close(h2_mplx *m, h2_task *task)
{
- apr_status_t status;
- int acquired;
+ apr_status_t status = APR_SUCCESS;
+ h2_stream *stream;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- status = out_write(m, io, f, blocking, bb, iowait);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
- "h2_mplx(%ld-%d): write", m->id, io->id);
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write");
-
- have_out_data_for(m, stream_id);
- }
- else {
- status = APR_ECONNABORTED;
- }
- leave_mutex(m, acquired);
+ if (!task) {
+ return APR_ECONNABORTED;
}
- return status;
-}
-apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id)
-{
- apr_status_t status;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- if (!io->response && !io->rst_error) {
- /* In case a close comes before a response was created,
- * insert an error one so that our streams can properly
- * reset.
- */
- h2_response *r = h2_response_die(stream_id, APR_EGENERAL,
- io->request, m->pool);
- status = out_open(m, stream_id, r, NULL, NULL, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
- "h2_mplx(%ld-%d): close, no response, no rst",
- m->id, io->id);
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
- "h2_mplx(%ld-%d): close with eor=%s",
- m->id, io->id, io->eor? "yes" : "no");
- status = h2_io_out_close(io);
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_close");
- io_out_consumed_signal(m, io);
-
- have_out_data_for(m, stream_id);
- }
- else {
- status = APR_ECONNABORTED;
- }
- leave_mutex(m, acquired);
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (!stream) {
+ return APR_ECONNABORTED;
}
- return status;
-}
-apr_status_t h2_mplx_out_rst(h2_mplx *m, int stream_id, int error)
-{
- apr_status_t status;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->rst_error && !io->orphaned) {
- h2_io_rst(io, error);
- if (!io->response) {
- h2_io_set_add(m->ready_ios, io);
- }
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_rst");
-
- have_out_data_for(m, stream_id);
- h2_io_signal(io, H2_IO_WRITE);
- }
- else {
- status = APR_ECONNABORTED;
- }
- leave_mutex(m, acquired);
+ if (!task->response && !task->rst_error) {
+ /* In case a close comes before a response was created,
+ * insert an error one so that our streams can properly reset.
+ */
+ h2_response *r = h2_response_die(task->stream_id, 500,
+ task->request, m->pool);
+ status = out_open(m, task->stream_id, r);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, APLOGNO(03393)
+ "h2_mplx(%s): close, no response, no rst", task->id);
}
- return status;
-}
-
-int h2_mplx_out_has_data_for(h2_mplx *m, int stream_id)
-{
- apr_status_t status;
- int has_data = 0;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- has_data = h2_io_out_has_data(io);
- }
- else {
- has_data = 0;
- }
- leave_mutex(m, acquired);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
+ "h2_mplx(%s): close", task->id);
+ if (task->output.beam) {
+ status = h2_beam_close(task->output.beam);
+ h2_beam_log(task->output.beam, task->stream_id, "out_close", m->c,
+ APLOG_TRACE2);
}
- return has_data;
+ output_consumed_signal(m, task);
+ have_out_data_for(m, task->stream_id);
+ return status;
}
apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
@@ -925,7 +765,11 @@ apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
if (m->aborted) {
status = APR_ECONNABORTED;
}
+ else if (!h2_ihash_empty(m->sready) || !h2_ihash_empty(m->sresume)) {
+ status = APR_SUCCESS;
+ }
else {
+ purge_streams(m);
m->added_output = iowait;
status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
if (APLOGctrace2(m->c)) {
@@ -969,22 +813,7 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
return status;
}
-static h2_io *open_io(h2_mplx *m, int stream_id, const h2_request *request)
-{
- apr_pool_t *io_pool;
- h2_io *io;
-
- apr_pool_create(&io_pool, m->pool);
- apr_pool_tag(io_pool, "h2_io");
- io = h2_io_create(stream_id, io_pool, m->bucket_alloc, request);
- h2_io_set_add(m->stream_ios, io);
-
- return io;
-}
-
-
-apr_status_t h2_mplx_process(h2_mplx *m, int stream_id,
- const h2_request *req,
+apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status;
@@ -997,24 +826,32 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id,
status = APR_ECONNABORTED;
}
else {
- h2_io *io = open_io(m, stream_id, req);
-
- if (!io->request->body) {
- status = h2_io_in_close(io);
+ h2_ihash_add(m->streams, stream);
+ if (stream->response) {
+ /* already have a respone, schedule for submit */
+ h2_ihash_add(m->sready, stream);
+ }
+ else {
+ h2_beam_create(&stream->input, stream->pool, stream->id,
+ "input", 0);
+ if (!m->need_registration) {
+ m->need_registration = h2_iq_empty(m->q);
+ }
+ if (m->workers_busy < m->workers_max) {
+ do_registration = m->need_registration;
+ }
+ h2_iq_add(m->q, stream->id, cmp, ctx);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): process, body=%d",
+ m->c->id, stream->id, stream->request->body);
}
-
- m->need_registration = m->need_registration || h2_iq_empty(m->q);
- do_registration = (m->need_registration && m->workers_busy < m->workers_max);
- h2_iq_add(m->q, io->id, cmp, ctx);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
- "h2_mplx(%ld-%d): process", m->c->id, stream_id);
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_process");
}
leave_mutex(m, acquired);
}
- if (status == APR_SUCCESS && do_registration) {
- workers_register(m);
+ if (do_registration) {
+ m->need_registration = 0;
+ h2_workers_register(m->workers, m);
}
return status;
}
@@ -1022,21 +859,16 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id,
static h2_task *pop_task(h2_mplx *m)
{
h2_task *task = NULL;
+ h2_stream *stream;
int sid;
- while (!m->aborted && !task
- && (m->workers_busy < m->workers_limit)
- && (sid = h2_iq_shift(m->q)) > 0) {
- h2_io *io = h2_io_set_get(m->stream_ios, sid);
- if (io && io->orphaned) {
- io_destroy(m, io, 0);
- if (m->join_wait) {
- apr_thread_cond_signal(m->join_wait);
- }
- }
- else if (io) {
+ while (!m->aborted && !task && (m->workers_busy < m->workers_limit)
+ && (sid = h2_iq_shift(m->q)) > 0) {
+
+ stream = h2_ihash_get(m->streams, sid);
+ if (stream) {
conn_rec *slave, **pslave;
int new_conn = 0;
-
+
pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
if (pslave) {
slave = *pslave;
@@ -1047,17 +879,29 @@ static h2_task *pop_task(h2_mplx *m)
}
slave->sbh = m->c->sbh;
- io->task = task = h2_task_create(m->id, io->request, slave, m);
+ slave->aborted = 0;
+ task = h2_task_create(slave, stream->request, stream->input, m);
+ h2_ihash_add(m->tasks, task);
+
m->c->keepalives++;
apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id);
if (new_conn) {
h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
}
- io->worker_started = 1;
- io->started_at = apr_time_now();
+ stream->started = 1;
+ task->worker_started = 1;
+ task->started_at = apr_time_now();
if (sid > m->max_stream_started) {
m->max_stream_started = sid;
}
+
+ if (stream->input) {
+ h2_beam_timeout_set(stream->input, m->stream_timeout);
+ h2_beam_on_consumed(stream->input, stream_input_consumed, m);
+ h2_beam_on_file_beam(stream->input, can_beam_file, m);
+ h2_beam_mutex_set(stream->input, beam_enter, task->cond, m);
+ }
+
++m->workers_busy;
}
}
@@ -1090,100 +934,119 @@ h2_task *h2_mplx_pop_task(h2_mplx *m, int *has_more)
static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
{
- if (task) {
- h2_io *io = h2_io_set_get(m->stream_ios, task->stream_id);
+ if (task->frozen) {
+ /* this task was handed over to an engine for processing
+ * and the original worker has finished. That means the
+ * engine may start processing now. */
+ h2_task_thaw(task);
+ /* we do not want the task to block on writing response
+ * bodies into the mplx. */
+ h2_task_set_io_blocking(task, 0);
+ apr_thread_cond_broadcast(m->task_thawed);
+ return;
+ }
+ else {
+ h2_stream *stream;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): task(%s) done", m->id, task->id);
+ out_close(m, task);
+ stream = h2_ihash_get(m->streams, task->stream_id);
- if (task->frozen) {
- /* this task was handed over to an engine for processing
- * and the original worker has finished. That means the
- * engine may start processing now. */
- h2_task_thaw(task);
- /* we do not want the task to block on writing response
- * bodies into the mplx. */
- /* FIXME: this implementation is incomplete. */
- h2_task_set_io_blocking(task, 0);
- apr_thread_cond_broadcast(m->task_thawed);
+ if (ngn) {
+ apr_off_t bytes = 0;
+ if (task->output.beam) {
+ h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
+ bytes += h2_beam_get_buffered(task->output.beam);
+ }
+ if (bytes > 0) {
+ /* we need to report consumed and current buffered output
+ * to the engine. The request will be streamed out or cancelled,
+ * no more data is coming from it and the engine should update
+ * its calculations before we destroy this information. */
+ h2_req_engine_out_consumed(ngn, task->c, bytes);
+ }
}
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): task(%s) done", m->id, task->id);
- /* clean our references and report request as done. Signal
- * that we want another unless we have been aborted */
- /* TODO: this will keep a worker attached to this h2_mplx as
- * long as it has requests to handle. Might no be fair to
- * other mplx's. Perhaps leave after n requests? */
- h2_mplx_out_close(m, task->stream_id);
-
- if (ngn && io) {
- apr_off_t bytes = io->output_consumed + h2_io_out_length(io);
- if (bytes > 0) {
- /* we need to report consumed and current buffered output
- * to the engine. The request will be streamed out or cancelled,
- * no more data is coming from it and the engine should update
- * its calculations before we destroy this information. */
- h2_req_engine_out_consumed(ngn, task->c, bytes);
- io->output_consumed = 0;
- }
+
+ if (task->engine) {
+ if (!h2_req_engine_is_shutdown(task->engine)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
+ "h2_mplx(%ld): task(%s) has not-shutdown "
+ "engine(%s)", m->id, task->id,
+ h2_req_engine_get_id(task->engine));
}
-
- if (task->engine) {
- if (!h2_req_engine_is_shutdown(task->engine)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
- "h2_mplx(%ld): task(%s) has not-shutdown "
- "engine(%s)", m->id, task->id,
- h2_req_engine_get_id(task->engine));
- }
- h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
+ h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
+ }
+
+ if (!m->aborted && stream && m->redo_tasks
+ && h2_ihash_get(m->redo_tasks, task->stream_id)) {
+ /* reset and schedule again */
+ h2_task_redo(task);
+ h2_ihash_remove(m->redo_tasks, task->stream_id);
+ h2_iq_add(m->q, task->stream_id, NULL, NULL);
+ return;
+ }
+
+ task->worker_done = 1;
+ task->done_at = apr_time_now();
+ if (task->output.beam) {
+ h2_beam_on_consumed(task->output.beam, NULL, NULL);
+ h2_beam_mutex_set(task->output.beam, NULL, NULL, NULL);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): request done, %f ms elapsed", task->id,
+ (task->done_at - task->started_at) / 1000.0);
+ if (task->started_at > m->last_idle_block) {
+ /* this task finished without causing an 'idle block', e.g.
+ * a block by flow control.
+ */
+ if (task->done_at- m->last_limit_change >= m->limit_change_interval
+ && m->workers_limit < m->workers_max) {
+ /* Well behaving stream, allow it more workers */
+ m->workers_limit = H2MIN(m->workers_limit * 2,
+ m->workers_max);
+ m->last_limit_change = task->done_at;
+ m->need_registration = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): increase worker limit to %d",
+ m->id, m->workers_limit);
}
-
- if (io) {
- apr_time_t now = apr_time_now();
- if (!io->orphaned && m->redo_ios
- && h2_io_set_get(m->redo_ios, io->id)) {
- /* reset and schedule again */
- h2_io_redo(io);
- h2_io_set_remove(m->redo_ios, io);
- h2_iq_add(m->q, io->id, NULL, NULL);
- }
- else {
- io->worker_done = 1;
- io->done_at = now;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): request(%d) done, %f ms"
- " elapsed", m->id, io->id,
- (io->done_at - io->started_at) / 1000.0);
- if (io->started_at > m->last_idle_block) {
- /* this task finished without causing an 'idle block', e.g.
- * a block by flow control.
- */
- if (now - m->last_limit_change >= m->limit_change_interval
- && m->workers_limit < m->workers_max) {
- /* Well behaving stream, allow it more workers */
- m->workers_limit = H2MIN(m->workers_limit * 2,
- m->workers_max);
- m->last_limit_change = now;
- m->need_registration = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): increase worker limit to %d",
- m->id, m->workers_limit);
- }
- }
- }
-
- if (io->orphaned) {
- io_destroy(m, io, 0);
- if (m->join_wait) {
- apr_thread_cond_signal(m->join_wait);
- }
- }
- else {
- /* hang around until the stream deregisters */
- }
+ }
+
+ if (stream) {
+ /* hang around until the stream deregisters */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): task_done, stream still open",
+ task->id);
+ if (h2_stream_is_suspended(stream)) {
+ /* more data will not arrive, resume the stream */
+ h2_ihash_add(m->sresume, stream);
+ have_out_data_for(m, stream->id);
+ }
+ }
+ else {
+ /* stream no longer active, was it placed in hold? */
+ stream = h2_ihash_get(m->shold, task->stream_id);
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): task_done, stream in hold",
+ task->id);
+ /* We cannot destroy the stream here since this is
+ * called from a worker thread and freeing memory pools
+ * is only safe in the only thread using it (and its
+ * parent pool / allocator) */
+ h2_ihash_remove(m->shold, stream->id);
+ h2_ihash_add(m->spurge, stream);
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
- "h2_mplx(%ld): task %s without corresp. h2_io",
- m->id, task->id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): task_done, stream not found",
+ task->id);
+ task_destroy(m, task, 0);
+ }
+
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
}
}
}
@@ -1208,80 +1071,76 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
* h2_mplx DoS protection
******************************************************************************/
-typedef struct {
- h2_mplx *m;
- h2_io *io;
- apr_time_t now;
-} io_iter_ctx;
-
-static int latest_repeatable_busy_unsubmitted_iter(void *data, h2_io *io)
+static int latest_repeatable_unsubmitted_iter(void *data, void *val)
{
- io_iter_ctx *ctx = data;
- if (io->worker_started && !io->worker_done
- && h2_io_is_repeatable(io)
- && !h2_io_set_get(ctx->m->redo_ios, io->id)) {
- /* this io occupies a worker, the response has not been submitted yet,
+ task_iter_ctx *ctx = data;
+ h2_task *task = val;
+ if (!task->worker_done && h2_task_can_redo(task)
+ && !h2_ihash_get(ctx->m->redo_tasks, task->stream_id)) {
+ /* this task occupies a worker, the response has not been submitted yet,
* not been cancelled and it is a repeatable request
* -> it can be re-scheduled later */
- if (!ctx->io || ctx->io->started_at < io->started_at) {
+ if (!ctx->task || ctx->task->started_at < task->started_at) {
/* we did not have one or this one was started later */
- ctx->io = io;
+ ctx->task = task;
}
}
return 1;
}
-static h2_io *get_latest_repeatable_busy_unsubmitted_io(h2_mplx *m)
+static h2_task *get_latest_repeatable_unsubmitted_task(h2_mplx *m)
{
- io_iter_ctx ctx;
+ task_iter_ctx ctx;
ctx.m = m;
- ctx.io = NULL;
- h2_io_set_iter(m->stream_ios, latest_repeatable_busy_unsubmitted_iter, &ctx);
- return ctx.io;
+ ctx.task = NULL;
+ h2_ihash_iter(m->tasks, latest_repeatable_unsubmitted_iter, &ctx);
+ return ctx.task;
}
-static int timed_out_busy_iter(void *data, h2_io *io)
+static int timed_out_busy_iter(void *data, void *val)
{
- io_iter_ctx *ctx = data;
- if (io->worker_started && !io->worker_done
- && (ctx->now - io->started_at) > ctx->m->stream_timeout) {
+ task_iter_ctx *ctx = data;
+ h2_task *task = val;
+ if (!task->worker_done
+ && (ctx->now - task->started_at) > ctx->m->stream_timeout) {
/* timed out stream occupying a worker, found */
- ctx->io = io;
+ ctx->task = task;
return 0;
}
return 1;
}
-static h2_io *get_timed_out_busy_stream(h2_mplx *m)
+
+static h2_task *get_timed_out_busy_task(h2_mplx *m)
{
- io_iter_ctx ctx;
+ task_iter_ctx ctx;
ctx.m = m;
- ctx.io = NULL;
+ ctx.task = NULL;
ctx.now = apr_time_now();
- h2_io_set_iter(m->stream_ios, timed_out_busy_iter, &ctx);
- return ctx.io;
+ h2_ihash_iter(m->tasks, timed_out_busy_iter, &ctx);
+ return ctx.task;
}
-static apr_status_t unschedule_slow_ios(h2_mplx *m)
+static apr_status_t unschedule_slow_tasks(h2_mplx *m)
{
- h2_io *io;
+ h2_task *task;
int n;
- if (!m->redo_ios) {
- m->redo_ios = h2_io_set_create(m->pool);
+ if (!m->redo_tasks) {
+ m->redo_tasks = h2_ihash_create(m->pool, offsetof(h2_task, stream_id));
}
/* Try to get rid of streams that occupy workers. Look for safe requests
* that are repeatable. If none found, fail the connection.
*/
- n = (m->workers_busy - m->workers_limit - h2_io_set_size(m->redo_ios));
- while (n > 0 && (io = get_latest_repeatable_busy_unsubmitted_io(m))) {
- h2_io_set_add(m->redo_ios, io);
- h2_io_rst(io, H2_ERR_CANCEL);
+ n = (m->workers_busy - m->workers_limit - h2_ihash_count(m->redo_tasks));
+ while (n > 0 && (task = get_latest_repeatable_unsubmitted_task(m))) {
+ h2_task_rst(task, H2_ERR_CANCEL);
+ h2_ihash_add(m->redo_tasks, task);
--n;
}
- if ((m->workers_busy - h2_io_set_size(m->redo_ios)) > m->workers_limit) {
- io = get_timed_out_busy_stream(m);
- if (io) {
+ if ((m->workers_busy - h2_ihash_count(m->redo_tasks)) > m->workers_limit) {
+ task = get_timed_out_busy_task(m);
+ if (task) {
/* Too many busy workers, unable to cancel enough streams
* and with a busy, timed out stream, we tell the client
* to go away... */
@@ -1298,7 +1157,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
int acquired;
if (enter_mutex(m, &acquired) == APR_SUCCESS) {
- apr_size_t scount = h2_io_set_size(m->stream_ios);
+ apr_size_t scount = h2_ihash_count(m->streams);
if (scount > 0 && m->workers_busy) {
/* If we have streams in connection state 'IDLE', meaning
* all streams are ready to sent data out, but lack
@@ -1335,7 +1194,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
}
if (m->workers_busy > m->workers_limit) {
- status = unschedule_slow_ios(m);
+ status = unschedule_slow_tasks(m);
}
}
leave_mutex(m, acquired);
@@ -1353,11 +1212,12 @@ typedef struct {
int streams_updated;
} ngn_update_ctx;
-static int ngn_update_window(void *ctx, h2_io *io)
+static int ngn_update_window(void *ctx, void *val)
{
ngn_update_ctx *uctx = ctx;
- if (io && io->task && io->task->assigned == uctx->ngn
- && io_out_consumed_signal(uctx->m, io)) {
+ h2_task *task = val;
+ if (task && task->assigned == uctx->ngn
+ && output_consumed_signal(uctx->m, task)) {
++uctx->streams_updated;
}
return 1;
@@ -1370,7 +1230,7 @@ static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn)
ctx.m = m;
ctx.ngn = ngn;
ctx.streams_updated = 0;
- h2_io_set_iter(m->stream_ios, ngn_update_window, &ctx);
+ h2_ihash_iter(m->tasks, ngn_update_window, &ctx);
return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN;
}
@@ -1392,12 +1252,13 @@ apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
task->r = r;
if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_io *io = h2_io_set_get(m->stream_ios, task->stream_id);
- if (!io || io->orphaned) {
- status = APR_ECONNABORTED;
+ h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
+
+ if (stream) {
+ status = h2_ngn_shed_push_task(m->ngn_shed, ngn_type, task, einit);
}
else {
- status = h2_ngn_shed_push_task(m->ngn_shed, ngn_type, task, einit);
+ status = APR_ECONNABORTED;
}
leave_mutex(m, acquired);
}
@@ -1469,4 +1330,129 @@ void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn)
}
}
}
-
+
+/*******************************************************************************
+ * mplx master events dispatching
+ ******************************************************************************/
+
+static int update_window(void *ctx, void *val)
+{
+ input_consumed_signal(ctx, val);
+ return 1;
+}
+
+apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+ stream_ev_callback *on_resume,
+ stream_ev_callback *on_response,
+ void *on_ctx)
+{
+ apr_status_t status;
+ int acquired;
+ int streams[32];
+ h2_stream *stream;
+ h2_task *task;
+ size_t i, n;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld): dispatch events", m->id);
+
+ /* update input windows for streams */
+ h2_ihash_iter(m->streams, update_window, m);
+
+ if (on_response && !h2_ihash_empty(m->sready)) {
+ n = h2_ihash_ishift(m->sready, streams, H2_ALEN(streams));
+ for (i = 0; i < n; ++i) {
+ stream = h2_ihash_get(m->streams, streams[i]);
+ if (!stream) {
+ continue;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): on_response",
+ m->id, stream->id);
+ task = h2_ihash_get(m->tasks, stream->id);
+ if (task) {
+ task->submitted = 1;
+ if (task->rst_error) {
+ h2_stream_rst(stream, task->rst_error);
+ }
+ else {
+ AP_DEBUG_ASSERT(task->response);
+ h2_stream_set_response(stream, task->response, task->output.beam);
+ }
+ }
+ else {
+ /* We have the stream ready without a task. This happens
+ * when we fail streams early. A response should already
+ * be present. */
+ AP_DEBUG_ASSERT(stream->response || stream->rst_error);
+ }
+ status = on_response(on_ctx, stream->id);
+ }
+ }
+
+ if (on_resume && !h2_ihash_empty(m->sresume)) {
+ n = h2_ihash_ishift(m->sresume, streams, H2_ALEN(streams));
+ for (i = 0; i < n; ++i) {
+ stream = h2_ihash_get(m->streams, streams[i]);
+ if (!stream) {
+ continue;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): on_resume",
+ m->id, stream->id);
+ h2_stream_set_suspended(stream, 0);
+ status = on_resume(on_ctx, stream->id);
+ }
+ }
+
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+{
+ h2_mplx *m = ctx;
+ apr_status_t status;
+ h2_stream *stream;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ stream = h2_ihash_get(m->streams, beam->id);
+ if (stream && h2_stream_is_suspended(stream)) {
+ h2_ihash_add(m->sresume, stream);
+ h2_beam_on_produced(beam, NULL, NULL);
+ have_out_data_for(m, beam->id);
+ }
+ leave_mutex(m, acquired);
+ }
+}
+
+apr_status_t h2_mplx_suspend_stream(h2_mplx *m, int stream_id)
+{
+ apr_status_t status;
+ h2_stream *stream;
+ h2_task *task;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ stream = h2_ihash_get(m->streams, stream_id);
+ if (stream) {
+ h2_stream_set_suspended(stream, 1);
+ task = h2_ihash_get(m->tasks, stream->id);
+ if (stream->started && (!task || task->worker_done)) {
+ h2_ihash_add(m->sresume, stream);
+ }
+ else {
+ /* register callback so that we can resume on new output */
+ h2_beam_on_produced(task->output.beam, output_produced, m);
+ }
+ }
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
index 40298476..821e6d65 100644
--- a/modules/http2/h2_mplx.h
+++ b/modules/http2/h2_mplx.h
@@ -37,21 +37,20 @@
struct apr_pool_t;
struct apr_thread_mutex_t;
struct apr_thread_cond_t;
+struct h2_bucket_beam;
struct h2_config;
struct h2_ihash_t;
struct h2_response;
struct h2_task;
struct h2_stream;
struct h2_request;
-struct h2_io_set;
struct apr_thread_cond_t;
struct h2_workers;
-struct h2_int_queue;
+struct h2_iqueue;
struct h2_ngn_shed;
struct h2_req_engine;
#include <apr_queue.h>
-#include "h2_io.h"
typedef struct h2_mplx h2_mplx;
@@ -72,10 +71,16 @@ struct h2_mplx {
unsigned int aborted : 1;
unsigned int need_registration : 1;
- struct h2_int_queue *q;
- struct h2_io_set *stream_ios;
- struct h2_io_set *ready_ios;
- struct h2_io_set *redo_ios;
+ struct h2_ihash_t *streams; /* all streams currently processing */
+ struct h2_ihash_t *shold; /* all streams done with task ongoing */
+ struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
+
+ struct h2_iqueue *q; /* all stream ids that need to be started */
+ struct h2_ihash_t *sready; /* all streams ready for response */
+ struct h2_ihash_t *sresume; /* all streams that can be resumed */
+
+ struct h2_ihash_t *tasks; /* all tasks started and not destroyed */
+ struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */
apr_uint32_t max_streams; /* max # of concurrent streams */
apr_uint32_t max_stream_started; /* highest stream id that started processing */
@@ -96,10 +101,11 @@ struct h2_mplx {
apr_size_t stream_max_mem;
apr_interval_time_t stream_timeout;
+ apr_pool_t *spare_io_pool;
apr_array_header_t *spare_slaves; /* spare slave connections */
struct h2_workers *workers;
- apr_size_t tx_handles_reserved;
+ int tx_handles_reserved;
apr_size_t tx_chunk_size;
h2_mplx_consumed_cb *input_consumed;
@@ -160,15 +166,11 @@ apr_uint32_t h2_mplx_shutdown(h2_mplx *m);
* Notifies mplx that a stream has finished processing.
*
* @param m the mplx itself
- * @param stream_id the id of the stream being done
+ * @param stream the id of the stream being done
* @param rst_error if != 0, the stream was reset with the error given
*
*/
-apr_status_t h2_mplx_stream_done(h2_mplx *m, int stream_id, int rst_error);
-
-/* Return != 0 iff the multiplexer has output data for the given stream.
- */
-int h2_mplx_out_has_data_for(h2_mplx *m, int stream_id);
+apr_status_t h2_mplx_stream_done(h2_mplx *m, struct h2_stream *stream);
/**
* Waits on output data from any stream in this session to become available.
@@ -185,13 +187,12 @@ apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
* Process a stream request.
*
* @param m the multiplexer
- * @param stream_id the identifier of the stream
+ * @param stream the identifier of the stream
* @param r the request to be processed
* @param cmp the stream priority compare function
* @param ctx context data for the compare function
*/
-apr_status_t h2_mplx_process(h2_mplx *m, int stream_id,
- const struct h2_request *r,
+apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
h2_stream_pri_cmp *cmp, void *ctx);
/**
@@ -214,96 +215,30 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
*/
void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx);
-/*******************************************************************************
- * Input handling of streams.
- ******************************************************************************/
-/**
- * Reads a buckets for the given stream_id. Will return ARP_EAGAIN when
- * called with APR_NONBLOCK_READ and no data present. Will return APR_EOF
- * when the end of the stream input has been reached.
- * The condition passed in will be used for blocking/signalling and will
- * be protected by the mplx's own mutex.
- */
-apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block,
- int stream_id, apr_bucket_brigade *bb,
- apr_table_t *trailers,
- struct apr_thread_cond_t *iowait);
-
-/**
- * Appends data to the input of the given stream. Storage of input data is
- * not subject to flow control.
- */
-apr_status_t h2_mplx_in_write(h2_mplx *m, int stream_id,
- const char *data, apr_size_t len, int eos);
+typedef apr_status_t stream_ev_callback(void *ctx, int stream_id);
/**
- * Closes the input for the given stream_id.
+ * Dispatch events for the master connection, such as
+ * - resume: new output data has arrived for a suspended stream
+ * - response: the response for a stream is ready
*/
-apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id);
+apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+ stream_ev_callback *on_resume,
+ stream_ev_callback *on_response,
+ void *ctx);
-/**
- * Invoke the consumed callback for all streams that had bytes read since the
- * last call to this function. If no stream had input data consumed, the
- * callback is not invoked.
- * The consumed callback may also be invoked at other times whenever
- * the need arises.
- * Returns APR_SUCCESS when an update happened, APR_EAGAIN if no update
- * happened.
- */
-apr_status_t h2_mplx_in_update_windows(h2_mplx *m);
+apr_status_t h2_mplx_suspend_stream(h2_mplx *m, int stream_id);
/*******************************************************************************
* Output handling of streams.
******************************************************************************/
/**
- * Get a stream whose response is ready for submit. Will set response and
- * any out data available in stream.
- * @param m the mplxer to get a response from
- * @param bb the brigade to place any existing repsonse body data into
- */
-struct h2_stream *h2_mplx_next_submit(h2_mplx *m,
- struct h2_ihash_t *streams);
-
-/**
- * Reads output data into the given brigade. Will never block, but
- * return APR_EAGAIN until data arrives or the stream is closed.
- */
-apr_status_t h2_mplx_out_get_brigade(h2_mplx *mplx, int stream_id,
- apr_bucket_brigade *bb,
- apr_off_t len, apr_table_t **ptrailers);
-
-/**
* Opens the output for the given stream with the specified response.
*/
apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
- struct h2_response *response,
- ap_filter_t* filter, apr_bucket_brigade *bb,
- struct apr_thread_cond_t *iowait);
-
-/**
- * Append the brigade to the stream output. Might block if amount
- * of bytes buffered reaches configured max.
- * @param stream_id the stream identifier
- * @param filter the apache filter context of the data
- * @param blocking == 0 iff call should return with APR_INCOMPLETE if
- * the full brigade cannot be written at once
- * @param bb the bucket brigade to append
- * @param iowait a conditional used for block/signalling in h2_mplx
- */
-apr_status_t h2_mplx_out_write(h2_mplx *mplx, int stream_id,
- ap_filter_t* filter,
- int blocking,
- apr_bucket_brigade *bb,
- struct apr_thread_cond_t *iowait);
-
-/**
- * Closes the output for stream stream_id.
- */
-apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id);
-
-apr_status_t h2_mplx_out_rst(h2_mplx *m, int stream_id, int error);
+ struct h2_response *response);
/*******************************************************************************
* h2_mplx list Manipulation.
diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c
index 32483d93..f0676421 100644
--- a/modules/http2/h2_ngn_shed.c
+++ b/modules/http2/h2_ngn_shed.c
@@ -29,16 +29,15 @@
#include "mod_http2.h"
#include "h2_private.h"
+#include "h2.h"
#include "h2_config.h"
#include "h2_conn.h"
#include "h2_ctx.h"
#include "h2_h2.h"
-#include "h2_int_queue.h"
#include "h2_mplx.h"
#include "h2_response.h"
#include "h2_request.h"
#include "h2_task.h"
-#include "h2_task_output.h"
#include "h2_util.h"
#include "h2_ngn_shed.h"
@@ -140,6 +139,8 @@ h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn)
void h2_ngn_shed_abort(h2_ngn_shed *shed)
{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03394)
+ "h2_ngn_shed(%ld): abort", shed->c->id);
shed->aborted = 1;
}
@@ -203,7 +204,7 @@ apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type,
status = einit(newngn, newngn->id, newngn->type, newngn->pool,
shed->req_buffer_size, task->r,
&newngn->out_consumed, &newngn->out_consumed_ctx);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395)
"h2_ngn_shed(%ld): create engine %s (%s)",
shed->c->id, newngn->id, newngn->type);
if (status == APR_SUCCESS) {
@@ -246,11 +247,11 @@ apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed,
AP_DEBUG_ASSERT(ngn);
*ptask = NULL;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03396)
"h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d",
shed->c->id, ngn->id, want_shutdown);
if (shed->aborted) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, shed->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03397)
"h2_ngn_shed(%ld): abort while pulling requests %s",
shed->c->id, ngn->id);
ngn->shutdown = 1;
@@ -269,18 +270,30 @@ apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed,
}
if ((entry = pop_detached(ngn))) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, APLOGNO(03398)
"h2_ngn_shed(%ld): pulled request %s for engine %s",
shed->c->id, entry->task->id, ngn->id);
ngn->no_live++;
*ptask = entry->task;
entry->task->assigned = ngn;
+ /* task will now run in ngn's own thread. Modules like lua
+ * seem to require the correct thread set in the conn_rec.
+ * See PR 59542. */
+ if (entry->task->c && ngn->c) {
+ entry->task->c->current_thread = ngn->c->current_thread;
+ }
+ if (entry->task->engine == ngn) {
+ /* If an engine pushes its own base task, and then pulls
+ * it back to itself again, it needs to be thawed.
+ */
+ h2_task_thaw(entry->task);
+ }
return APR_SUCCESS;
}
if (1) {
h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03399)
"h2_ngn_shed(%ld): pull task, nothing, first task %s",
shed->c->id, entry->task->id);
}
@@ -290,13 +303,14 @@ apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed,
static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn,
h2_task *task, int waslive, int aborted)
{
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03400)
"h2_ngn_shed(%ld): task %s %s by %s",
shed->c->id, task->id, aborted? "aborted":"done", ngn->id);
ngn->no_finished++;
if (waslive) ngn->no_live--;
ngn->no_assigned--;
-
+ task->assigned = NULL;
+
return APR_SUCCESS;
}
diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
new file mode 100644
index 00000000..79a2e82e
--- /dev/null
+++ b/modules/http2/h2_proxy_session.c
@@ -0,0 +1,1368 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <apr_strings.h>
+#include <nghttp2/nghttp2.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <mod_proxy.h>
+
+#include "mod_http2.h"
+#include "h2.h"
+#include "h2_proxy_util.h"
+#include "h2_proxy_session.h"
+
+APLOG_USE_MODULE(proxy_http2);
+
+typedef struct h2_proxy_stream {
+ int id;
+ apr_pool_t *pool;
+ h2_proxy_session *session;
+
+ const char *url;
+ request_rec *r;
+ h2_request *req;
+ int standalone;
+
+ h2_stream_state_t state;
+ unsigned int suspended : 1;
+ unsigned int data_sent : 1;
+ unsigned int data_received : 1;
+ uint32_t error_code;
+
+ apr_bucket_brigade *input;
+ apr_bucket_brigade *output;
+
+ apr_table_t *saves;
+} h2_proxy_stream;
+
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg);
+
+
+static apr_status_t proxy_session_pre_close(void *theconn)
+{
+ proxy_conn_rec *p_conn = (proxy_conn_rec *)theconn;
+ h2_proxy_session *session = p_conn->data;
+
+ if (session && session->ngh2) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "proxy_session(%s): pool cleanup, state=%d, streams=%d",
+ session->id, session->state,
+ (int)h2_ihash_count(session->streams));
+ session->aborted = 1;
+ dispatch_event(session, H2_PROXYS_EV_PRE_CLOSE, 0, NULL);
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ p_conn->data = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static int proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ proxy_conn_rec *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
+ int flush)
+{
+ apr_status_t status;
+ apr_off_t transferred;
+
+ if (flush) {
+ apr_bucket *e = apr_bucket_flush_create(bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+ apr_brigade_length(bb, 0, &transferred);
+ if (transferred != -1)
+ p_conn->worker->s->transferred += transferred;
+ status = ap_pass_brigade(origin->output_filters, bb);
+ /* Cleanup the brigade now to avoid buckets lifetime
+ * issues in case of error returned below. */
+ apr_brigade_cleanup(bb);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, origin, APLOGNO(03357)
+ "pass output failed to %pI (%s)",
+ p_conn->addr, p_conn->hostname);
+ }
+ return status;
+}
+
+static ssize_t raw_send(nghttp2_session *ngh2, const uint8_t *data,
+ size_t length, int flags, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ apr_bucket *b;
+ apr_status_t status;
+ int flush = 1;
+
+ if (data) {
+ b = apr_bucket_transient_create((const char*)data, length,
+ session->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->output, b);
+ }
+
+ status = proxy_pass_brigade(session->c->bucket_alloc,
+ session->p_conn, session->c,
+ session->output, flush);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_sesssion(%s): raw_send %d bytes, flush=%d",
+ session->id, (int)length, flush);
+ if (status != APR_SUCCESS) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ return length;
+}
+
+static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ int n;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03341)
+ "h2_proxy_session(%s): recv FRAME[%s]",
+ session->id, buffer);
+ }
+
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ break;
+ case NGHTTP2_PUSH_PROMISE:
+ break;
+ case NGHTTP2_SETTINGS:
+ if (frame->settings.niv > 0) {
+ n = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS);
+ if (n > 0) {
+ session->remote_max_concurrent = n;
+ }
+ }
+ break;
+ case NGHTTP2_GOAWAY:
+ /* we expect the remote server to tell us the highest stream id
+ * that it has started processing. */
+ session->last_stream_id = frame->goaway.last_stream_id;
+ dispatch_event(session, H2_PROXYS_EV_REMOTE_GOAWAY, 0, NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int before_frame_send(nghttp2_session *ngh2,
+ const nghttp2_frame *frame, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03343)
+ "h2_proxy_session(%s): sent FRAME[%s]",
+ session->id, buffer);
+ }
+ return 0;
+}
+
+static int add_header(void *table, const char *n, const char *v)
+{
+ apr_table_addn(table, n, v);
+ return 1;
+}
+
+static void process_proxy_header(request_rec *r, const char *n, const char *v)
+{
+ static const struct {
+ const char *name;
+ ap_proxy_header_reverse_map_fn func;
+ } transform_hdrs[] = {
+ { "Location", ap_proxy_location_reverse_map },
+ { "Content-Location", ap_proxy_location_reverse_map },
+ { "URI", ap_proxy_location_reverse_map },
+ { "Destination", ap_proxy_location_reverse_map },
+ { "Set-Cookie", ap_proxy_cookie_reverse_map },
+ { NULL, NULL }
+ };
+ proxy_dir_conf *dconf;
+ int i;
+
+ for (i = 0; transform_hdrs[i].name; ++i) {
+ if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ apr_table_add(r->headers_out, n,
+ (*transform_hdrs[i].func)(r, dconf, v));
+ return;
+ }
+ }
+ apr_table_add(r->headers_out, n, v);
+}
+
+static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
+ const char *n, apr_size_t nlen,
+ const char *v, apr_size_t vlen)
+{
+ if (n[0] == ':') {
+ if (!stream->data_received && !strncmp(":status", n, nlen)) {
+ char *s = apr_pstrndup(stream->r->pool, v, vlen);
+
+ apr_table_setn(stream->r->notes, "proxy-status", s);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got status %s",
+ stream->session->id, stream->id, s);
+ stream->r->status = (int)apr_atoi64(s);
+ if (stream->r->status <= 0) {
+ stream->r->status = 500;
+ return APR_EGENERAL;
+ }
+ }
+ return APR_SUCCESS;
+ }
+
+ if (!h2_proxy_res_ignore_header(n, nlen)) {
+ char *hname, *hvalue;
+
+ hname = apr_pstrndup(stream->pool, n, nlen);
+ h2_util_camel_case_header(hname, nlen);
+ hvalue = apr_pstrndup(stream->pool, v, vlen);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got header %s: %s",
+ stream->session->id, stream->id, hname, hvalue);
+ process_proxy_header(stream->r, hname, hvalue);
+ }
+ return APR_SUCCESS;
+}
+
+static int log_header(void *ctx, const char *key, const char *value)
+{
+ h2_proxy_stream *stream = ctx;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out %s: %s",
+ stream->session->id, stream->id, key, value);
+ return 1;
+}
+
+static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream)
+{
+ h2_proxy_session *session = stream->session;
+ request_rec *r = stream->r;
+ apr_pool_t *p = r->pool;
+
+ /* Now, add in the cookies from the response to the ones already saved */
+ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
+
+ /* and now load 'em all in */
+ if (!apr_is_empty_table(stream->saves)) {
+ apr_table_unset(r->headers_out, "Set-Cookie");
+ r->headers_out = apr_table_overlay(p, r->headers_out, stream->saves);
+ }
+
+ /* handle Via header in response */
+ if (session->conf->viaopt != via_off
+ && session->conf->viaopt != via_block) {
+ const char *server_name = ap_get_server_name(stream->r);
+ apr_port_t port = ap_get_server_port(stream->r);
+ char portstr[32];
+
+ /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
+ * then the server name returned by ap_get_server_name() is the
+ * origin server name (which does make too much sense with Via: headers)
+ * so we use the proxy vhost's name instead.
+ */
+ if (server_name == stream->r->hostname) {
+ server_name = stream->r->server->server_hostname;
+ }
+ if (ap_is_default_port(port, stream->r)) {
+ portstr[0] = '\0';
+ }
+ else {
+ apr_snprintf(portstr, sizeof(portstr), ":%d", port);
+ }
+
+ /* create a "Via:" response header entry and merge it */
+ apr_table_addn(r->headers_out, "Via",
+ (session->conf->viaopt == via_full)
+ ? apr_psprintf(p, "%d.%d %s%s (%s)",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr,
+ AP_SERVER_BASEVERSION)
+ : apr_psprintf(p, "%d.%d %s%s",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr)
+ );
+ }
+
+ if (APLOGrtrace2(stream->r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out after merging",
+ stream->session->id, stream->id);
+ apr_table_do(log_header, stream, stream->r->headers_out, NULL);
+ }
+}
+
+static int on_data_chunk_recv(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id, const uint8_t *data,
+ size_t len, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ apr_bucket *b;
+ apr_status_t status;
+
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03358)
+ "h2_proxy_session(%s): recv data chunk for "
+ "unknown stream %d, ignored",
+ session->id, stream_id);
+ return 0;
+ }
+
+ if (!stream->data_received) {
+ /* last chance to manipulate response headers.
+ * after this, only trailers */
+ h2_proxy_stream_end_headers_out(stream);
+ stream->data_received = 1;
+ }
+
+ b = apr_bucket_transient_create((const char*)data, len,
+ stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ /* always flush after a DATA frame, as we have no other indication
+ * of buffer use */
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, stream->r, APLOGNO(03359)
+ "h2_proxy_session(%s): pass response data for "
+ "stream %d, %d bytes", session->id, stream_id, (int)len);
+ status = ap_pass_brigade(stream->r->output_filters, stream->output);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344)
+ "h2_proxy_session(%s): passing output on stream %d",
+ session->id, stream->id);
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+ if (stream->standalone) {
+ nghttp2_session_consume(ngh2, stream_id, len);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_session(%s): stream %d, win_update %d bytes",
+ session->id, stream_id, (int)len);
+ }
+ return 0;
+}
+
+static int on_stream_close(nghttp2_session *ngh2, int32_t stream_id,
+ uint32_t error_code, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ if (!session->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03360)
+ "h2_proxy_session(%s): stream=%d, closed, err=%d",
+ session->id, stream_id, error_code);
+ stream = h2_ihash_get(session->streams, stream_id);
+ if (stream) {
+ stream->error_code = error_code;
+ }
+ dispatch_event(session, H2_PROXYS_EV_STREAM_DONE, stream_id, NULL);
+ }
+ return 0;
+}
+
+static int on_header(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ const uint8_t *namearg, size_t nlen,
+ const uint8_t *valuearg, size_t vlen, uint8_t flags,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ const char *n = (const char*)namearg;
+ const char *v = (const char*)valuearg;
+
+ (void)session;
+ if (frame->hd.type == NGHTTP2_HEADERS && nlen) {
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+ if (stream) {
+ if (h2_proxy_stream_add_header_out(stream, n, nlen, v, vlen)) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+ }
+ else if (frame->hd.type == NGHTTP2_PUSH_PROMISE) {
+ }
+
+ return 0;
+}
+
+static ssize_t stream_data_read(nghttp2_session *ngh2, int32_t stream_id,
+ uint8_t *buf, size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source, void *user_data)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status = APR_SUCCESS;
+
+ *data_flags = 0;
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361)
+ "h2_proxy_stream(%s): data_read, stream %d not found",
+ stream->session->id, stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (APR_BRIGADE_EMPTY(stream->input)) {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ H2MAX(APR_BUCKET_BUFF_SIZE, length));
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): request body read",
+ stream->session->id, stream->id);
+ }
+
+ if (status == APR_SUCCESS) {
+ ssize_t readlen = 0;
+ while (status == APR_SUCCESS
+ && (readlen < length)
+ && !APR_BRIGADE_EMPTY(stream->input)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(stream->input);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+ else {
+ /* we do nothing more regarding any meta here */
+ }
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+
+ if (status == APR_SUCCESS && blen > 0) {
+ ssize_t copylen = H2MIN(length - readlen, blen);
+ memcpy(buf, bdata, copylen);
+ buf += copylen;
+ readlen += copylen;
+ if (copylen < blen) {
+ /* We have data left in the bucket. Split it. */
+ status = apr_bucket_split(b, copylen);
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%d): request body read %ld bytes, flags=%d",
+ stream->id, (long)readlen, (int)*data_flags);
+ stream->data_sent = 1;
+ return readlen;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+ /* suspended stream, needs to be re-awakened */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): suspending",
+ stream->session->id, stream_id);
+ stream->suspended = 1;
+ h2_iq_add(stream->session->suspended, stream->id, NULL, NULL);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ else {
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+}
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done)
+{
+ if (!p_conn->data) {
+ apr_pool_t *pool = p_conn->scpool;
+ h2_proxy_session *session;
+ nghttp2_session_callbacks *cbs;
+ nghttp2_option *option;
+
+ session = apr_pcalloc(pool, sizeof(*session));
+ apr_pool_pre_cleanup_register(pool, p_conn, proxy_session_pre_close);
+ p_conn->data = session;
+
+ session->id = apr_pstrdup(p_conn->scpool, id);
+ session->c = p_conn->connection;
+ session->p_conn = p_conn;
+ session->conf = conf;
+ session->pool = p_conn->scpool;
+ session->state = H2_PROXYS_ST_INIT;
+ session->window_bits_stream = window_bits_stream;
+ session->window_bits_connection = window_bits_connection;
+ session->streams = h2_ihash_create(pool, offsetof(h2_proxy_stream, id));
+ session->suspended = h2_iq_create(pool, 5);
+ session->done = done;
+
+ session->input = apr_brigade_create(session->pool, session->c->bucket_alloc);
+ session->output = apr_brigade_create(session->pool, session->c->bucket_alloc);
+
+ nghttp2_session_callbacks_new(&cbs);
+ nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv);
+ nghttp2_session_callbacks_set_on_data_chunk_recv_callback(cbs, on_data_chunk_recv);
+ nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close);
+ nghttp2_session_callbacks_set_on_header_callback(cbs, on_header);
+ nghttp2_session_callbacks_set_before_frame_send_callback(cbs, before_frame_send);
+ nghttp2_session_callbacks_set_send_callback(cbs, raw_send);
+
+ nghttp2_option_new(&option);
+ nghttp2_option_set_peer_max_concurrent_streams(option, 100);
+ nghttp2_option_set_no_auto_window_update(option, 1);
+
+ nghttp2_session_client_new2(&session->ngh2, cbs, session, option);
+
+ nghttp2_option_del(option);
+ nghttp2_session_callbacks_del(cbs);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
+ "setup session for %s", p_conn->hostname);
+ }
+ return p_conn->data;
+}
+
+static apr_status_t session_start(h2_proxy_session *session)
+{
+ nghttp2_settings_entry settings[2];
+ int rv, add_conn_window;
+ apr_socket_t *s;
+
+ s = ap_get_conn_socket(session->c);
+#if (!defined(WIN32) && !defined(NETWARE)) || defined(DOXYGEN)
+ if (s) {
+ ap_sock_disable_nagle(s);
+ }
+#endif
+
+ settings[0].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH;
+ settings[0].value = 0;
+ settings[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[1].value = (1 << session->window_bits_stream) - 1;
+
+ rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE, settings,
+ H2_ALEN(settings));
+
+ /* If the connection window is larger than our default, trigger a WINDOW_UPDATE */
+ add_conn_window = ((1 << session->window_bits_connection) - 1 -
+ NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE);
+ if (!rv && add_conn_window != 0) {
+ rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE, 0, add_conn_window);
+ }
+ return rv? APR_EGENERAL : APR_SUCCESS;
+}
+
+static apr_status_t open_stream(h2_proxy_session *session, const char *url,
+ request_rec *r, int standalone,
+ h2_proxy_stream **pstream)
+{
+ h2_proxy_stream *stream;
+ apr_uri_t puri;
+ const char *authority, *scheme, *path;
+ apr_status_t status;
+
+ stream = apr_pcalloc(r->pool, sizeof(*stream));
+
+ stream->pool = r->pool;
+ stream->url = url;
+ stream->r = r;
+ stream->standalone = standalone;
+ stream->session = session;
+ stream->state = H2_STREAM_ST_IDLE;
+
+ stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+ stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+
+ stream->req = h2_req_create(1, stream->pool, 0);
+
+ status = apr_uri_parse(stream->pool, url, &puri);
+ if (status != APR_SUCCESS)
+ return status;
+
+ scheme = (strcmp(puri.scheme, "h2")? "http" : "https");
+ authority = puri.hostname;
+ if (!ap_strchr_c(authority, ':') && puri.port
+ && apr_uri_port_of_scheme(scheme) != puri.port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
+ }
+ path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
+ h2_req_make(stream->req, stream->pool, r->method, scheme,
+ authority, path, r->headers_in);
+
+ /* Tuck away all already existing cookies */
+ stream->saves = apr_table_make(r->pool, 2);
+ apr_table_do(add_header, stream->saves, r->headers_out,"Set-Cookie", NULL);
+
+ *pstream = stream;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *stream)
+{
+ h2_ngheader *hd;
+ nghttp2_data_provider *pp = NULL;
+ nghttp2_data_provider provider;
+ int rv;
+ apr_status_t status;
+
+ hd = h2_util_ngheader_make_req(stream->pool, stream->req);
+
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ if ((status == APR_SUCCESS && !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input)))
+ || APR_STATUS_IS_EAGAIN(status)) {
+ /* there might be data coming */
+ provider.source.fd = 0;
+ provider.source.ptr = NULL;
+ provider.read_callback = stream_data_read;
+ pp = &provider;
+ }
+
+ rv = nghttp2_submit_request(session->ngh2, NULL,
+ hd->nv, hd->nvlen, pp, stream);
+
+ if (APLOGcdebug(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
+ "h2_proxy_session(%s): submit %s%s -> %d",
+ session->id, stream->req->authority, stream->req->path,
+ rv);
+ }
+
+ if (rv > 0) {
+ stream->id = rv;
+ stream->state = H2_STREAM_ST_OPEN;
+ h2_ihash_add(session->streams, stream);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_SUBMITTED, rv, NULL);
+
+ return APR_SUCCESS;
+ }
+ return APR_EGENERAL;
+}
+
+static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t readlen = 0;
+ ssize_t n;
+
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* nop */
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+ if (status == APR_SUCCESS && blen > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)bdata, blen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): feeding %ld bytes -> %ld",
+ session->id, (long)blen, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ status = APR_EGENERAL;
+ }
+ }
+ else {
+ readlen += n;
+ if (n < blen) {
+ apr_bucket_split(b, n);
+ }
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_session(%s): fed %ld bytes of input to session",
+ session->id, (long)readlen);
+ if (readlen == 0 && status == APR_SUCCESS) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+static apr_status_t h2_proxy_session_read(h2_proxy_session *session, int block,
+ apr_interval_time_t timeout)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (APR_BRIGADE_EMPTY(session->input)) {
+ apr_socket_t *socket = NULL;
+ apr_time_t save_timeout = -1;
+
+ if (block) {
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &save_timeout);
+ apr_socket_timeout_set(socket, timeout);
+ }
+ else {
+ /* cannot block on timeout */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03379)
+ "h2_proxy_session(%s): unable to get conn socket",
+ session->id);
+ return APR_ENOTIMPL;
+ }
+ }
+
+ status = ap_get_brigade(session->c->input_filters, session->input,
+ AP_MODE_READBYTES,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ,
+ 64 * 1024);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ "h2_proxy_session(%s): read from conn", session->id);
+ if (socket && save_timeout != -1) {
+ apr_socket_timeout_set(socket, save_timeout);
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ status = feed_brigade(session, session->input);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ /* nop */
+ }
+ else if (!APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03380)
+ "h2_proxy_session(%s): read error", session->id);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+
+ return status;
+}
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *session,
+ const char *url, request_rec *r,
+ int standalone)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status;
+
+ status = open_stream(session, url, r, standalone, &stream);
+ if (status == APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03381)
+ "process stream(%d): %s %s%s, original: %s",
+ stream->id, stream->req->method,
+ stream->req->authority, stream->req->path,
+ r->the_request);
+ status = submit_stream(session, stream);
+ }
+ return status;
+}
+
+static apr_status_t check_suspended(h2_proxy_session *session)
+{
+ h2_proxy_stream *stream;
+ int i, stream_id;
+ apr_status_t status;
+
+ for (i = 0; i < session->suspended->nelts; ++i) {
+ stream_id = session->suspended->elts[i];
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(stream->input)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ "h2_proxy_stream(%s-%d): resuming",
+ session->id, stream_id);
+ stream->suspended = 0;
+ h2_iq_remove(session->suspended, stream_id);
+ nghttp2_session_resume_data(session->ngh2, stream_id);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c,
+ APLOGNO(03382) "h2_proxy_stream(%s-%d): check input",
+ session->id, stream_id);
+ h2_iq_remove(session->suspended, stream_id);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ else {
+ /* gone? */
+ h2_iq_remove(session->suspended, stream_id);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ return APR_EAGAIN;
+}
+
+static apr_status_t session_shutdown(h2_proxy_session *session, int reason,
+ const char *msg)
+{
+ apr_status_t status = APR_SUCCESS;
+ const char *err = msg;
+
+ AP_DEBUG_ASSERT(session);
+ if (!err && reason) {
+ err = nghttp2_strerror(reason);
+ }
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
+ reason, (uint8_t*)err, err? strlen(err):0);
+ status = nghttp2_session_send(session->ngh2);
+ dispatch_event(session, H2_PROXYS_EV_LOCAL_GOAWAY, reason, err);
+ return status;
+}
+
+
+static const char *StateNames[] = {
+ "INIT", /* H2_PROXYS_ST_INIT */
+ "DONE", /* H2_PROXYS_ST_DONE */
+ "IDLE", /* H2_PROXYS_ST_IDLE */
+ "BUSY", /* H2_PROXYS_ST_BUSY */
+ "WAIT", /* H2_PROXYS_ST_WAIT */
+ "LSHUTDOWN", /* H2_PROXYS_ST_LOCAL_SHUTDOWN */
+ "RSHUTDOWN", /* H2_PROXYS_ST_REMOTE_SHUTDOWN */
+};
+
+static const char *state_name(h2_proxys_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static int is_accepting_streams(h2_proxy_session *session)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_WAIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void transit(h2_proxy_session *session, const char *action,
+ h2_proxys_state nstate)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03345)
+ "h2_proxy_session(%s): transit [%s] -- %s --> [%s]", session->id,
+ state_name(session->state), action, state_name(nstate));
+ session->state = nstate;
+}
+
+static void ev_init(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ if (h2_ihash_empty(session->streams)) {
+ transit(session, "init", H2_PROXYS_ST_IDLE);
+ }
+ else {
+ transit(session, "init", H2_PROXYS_ST_BUSY);
+ }
+ break;
+
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_local_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* already did that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* all done */
+ transit(session, "local goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "local goaway", H2_PROXYS_ST_LOCAL_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_remote_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* already received that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* all done */
+ transit(session, "remote goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "remote goaway", H2_PROXYS_ST_REMOTE_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_conn_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "conn error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, arg, session->c,
+ "h2_proxy_session(%s): conn error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_proto_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "proto error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): proto error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_conn_timeout(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_no_io(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* nothing for input and output to do. If we remain
+ * in this state, we go into a tight loop and suck up
+ * CPU cycles. Ideally, we'd like to do a blocking read, but that
+ * is not possible if we have scheduled tasks and wait
+ * for them to produce something. */
+ if (h2_ihash_empty(session->streams)) {
+ if (!is_accepting_streams(session)) {
+ /* We are no longer accepting new streams and have
+ * finished processing existing ones. Time to leave. */
+ session_shutdown(session, arg, msg);
+ transit(session, "no io", H2_PROXYS_ST_DONE);
+ }
+ else {
+ /* When we have no streams, no task events are possible,
+ * switch to blocking reads */
+ transit(session, "no io", H2_PROXYS_ST_IDLE);
+ }
+ }
+ else {
+ /* Unable to do blocking reads, as we wait on events from
+ * task processing in other threads. Do a busy wait with
+ * backoff timer. */
+ transit(session, "no io", H2_PROXYS_ST_WAIT);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_submitted(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream submitted", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_done(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ h2_proxy_stream *stream;
+
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ int touched = (stream->data_sent ||
+ stream_id <= session->last_stream_id);
+ int complete = (stream->error_code == 0);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
+ "h2_proxy_sesssion(%s): stream(%d) closed "
+ "(complete=%d, touched=%d)",
+ session->id, stream_id, complete, touched);
+
+ if (complete && !stream->data_received) {
+ apr_bucket *b;
+ /* if the response had no body, this is the time to flush
+ * an empty brigade which will also "write" the resonse
+ * headers */
+ h2_proxy_stream_end_headers_out(stream);
+ stream->data_received = 1;
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ ap_pass_brigade(stream->r->output_filters, stream->output);
+ }
+
+ stream->state = H2_STREAM_ST_CLOSED;
+ h2_ihash_remove(session->streams, stream_id);
+ h2_iq_remove(session->suspended, stream_id);
+ if (session->done) {
+ session->done(session, stream->r, complete, touched);
+ }
+ }
+
+ switch (session->state) {
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_resumed(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream resumed", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_data_read(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "data read", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_ngh2_done(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_pre_close(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* nop */
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg)
+{
+ switch (ev) {
+ case H2_PROXYS_EV_INIT:
+ ev_init(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_LOCAL_GOAWAY:
+ ev_local_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_REMOTE_GOAWAY:
+ ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_ERROR:
+ ev_conn_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PROTO_ERROR:
+ ev_proto_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_TIMEOUT:
+ ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NO_IO:
+ ev_no_io(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_SUBMITTED:
+ ev_stream_submitted(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_DONE:
+ ev_stream_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_RESUMED:
+ ev_stream_resumed(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_DATA_READ:
+ ev_data_read(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NGH2_DONE:
+ ev_ngh2_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PRE_CLOSE:
+ ev_pre_close(session, arg, msg);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): unknown event %d",
+ session->id, ev);
+ break;
+ }
+}
+
+apr_status_t h2_proxy_session_process(h2_proxy_session *session)
+{
+ apr_status_t status;
+ int have_written = 0, have_read = 0;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): process", session->id);
+
+run_loop:
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ status = session_start(session);
+ if (status == APR_SUCCESS) {
+ dispatch_event(session, H2_PROXYS_EV_INIT, 0, NULL);
+ goto run_loop;
+ }
+ else {
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+ break;
+
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ while (nghttp2_session_want_write(session->ngh2)) {
+ int rv = nghttp2_session_send(session->ngh2);
+ if (rv < 0 && nghttp2_is_fatal(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): write, rv=%d", session->id, rv);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL);
+ break;
+ }
+ have_written = 1;
+ }
+
+ if (nghttp2_session_want_read(session->ngh2)) {
+ status = h2_proxy_session_read(session, 0, 0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ }
+ }
+
+ if (!have_written && !have_read
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NO_IO, 0, NULL);
+ goto run_loop;
+ }
+ break;
+
+ case H2_PROXYS_ST_WAIT:
+ if (check_suspended(session) == APR_EAGAIN) {
+ /* no stream has become resumed. Do a blocking read with
+ * ever increasing timeouts... */
+ if (session->wait_timeout < 25) {
+ session->wait_timeout = 25;
+ }
+ else {
+ session->wait_timeout = H2MIN(apr_time_from_msec(100),
+ 2*session->wait_timeout);
+ }
+
+ status = h2_proxy_session_read(session, 1, session->wait_timeout);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ APLOGNO(03365)
+ "h2_proxy_session(%s): WAIT read, timeout=%fms",
+ session->id, (float)session->wait_timeout/1000.0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)
+ || APR_STATUS_IS_EAGAIN(status)) {
+ /* go back to checking all inputs again */
+ transit(session, "wait cycle", H2_PROXYS_ST_BUSY);
+ }
+ }
+ break;
+
+ case H2_PROXYS_ST_IDLE:
+ break;
+
+ case H2_PROXYS_ST_DONE: /* done, session terminated */
+ return APR_EOF;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, session->c,
+ APLOGNO(03346)"h2_proxy_session(%s): unknown state %d",
+ session->id, session->state);
+ dispatch_event(session, H2_PROXYS_EV_PROTO_ERROR, 0, NULL);
+ break;
+ }
+
+
+ if (have_read || have_written) {
+ session->wait_timeout = 0;
+ }
+
+ if (!nghttp2_session_want_read(session->ngh2)
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NGH2_DONE, 0, NULL);
+ }
+
+ return APR_SUCCESS; /* needs to be called again */
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ h2_proxy_request_done *done;
+} cleanup_iter_ctx;
+
+static int done_iter(void *udata, void *val)
+{
+ cleanup_iter_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+ int touched = (!ctx->session->last_stream_id ||
+ stream->id <= ctx->session->last_stream_id);
+ ctx->done(ctx->session, stream->r, 0, touched);
+ return 1;
+}
+
+void h2_proxy_session_cleanup(h2_proxy_session *session,
+ h2_proxy_request_done *done)
+{
+ if (session->streams && !h2_ihash_empty(session->streams)) {
+ cleanup_iter_ctx ctx;
+ ctx.session = session;
+ ctx.done = done;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366)
+ "h2_proxy_session(%s): terminated, %d streams unfinished",
+ session->id, (int)h2_ihash_count(session->streams));
+ h2_ihash_iter(session->streams, done_iter, &ctx);
+ h2_ihash_clear(session->streams);
+ }
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ conn_rec *c;
+ apr_off_t bytes;
+ int updated;
+} win_update_ctx;
+
+static int win_update_iter(void *udata, void *val)
+{
+ win_update_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+
+ if (stream->r && stream->r->connection == ctx->c) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c,
+ "h2_proxy_session(%s-%d): win_update %ld bytes",
+ ctx->session->id, (int)stream->id, (long)ctx->bytes);
+ nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes);
+ ctx->updated = 1;
+ return 0;
+ }
+ return 1;
+}
+
+
+void h2_proxy_session_update_window(h2_proxy_session *session,
+ conn_rec *c, apr_off_t bytes)
+{
+ if (session->streams && !h2_ihash_empty(session->streams)) {
+ win_update_ctx ctx;
+ ctx.session = session;
+ ctx.c = c;
+ ctx.bytes = bytes;
+ ctx.updated = 0;
+ h2_ihash_iter(session->streams, win_update_iter, &ctx);
+
+ if (!ctx.updated) {
+ /* could not find the stream any more, possibly closed, update
+ * the connection window at least */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): win_update conn %ld bytes",
+ session->id, (long)bytes);
+ nghttp2_session_consume_connection(session->ngh2, (size_t)bytes);
+ }
+ }
+}
+
diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h
new file mode 100644
index 00000000..7f0a1940
--- /dev/null
+++ b/modules/http2/h2_proxy_session.h
@@ -0,0 +1,111 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_proxy_session_h
+#define h2_proxy_session_h
+
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#include <nghttp2/nghttp2.h>
+
+struct h2_iqueue;
+struct h2_ihash_t;
+
+typedef enum {
+ H2_PROXYS_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_PROXYS_ST_DONE, /* finished, connection close */
+ H2_PROXYS_ST_IDLE, /* no streams to process */
+ H2_PROXYS_ST_BUSY, /* read/write without stop */
+ H2_PROXYS_ST_WAIT, /* waiting for tasks reporting back */
+ H2_PROXYS_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */
+ H2_PROXYS_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */
+} h2_proxys_state;
+
+typedef enum {
+ H2_PROXYS_EV_INIT, /* session was initialized */
+ H2_PROXYS_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_PROXYS_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_PROXYS_EV_CONN_ERROR, /* connection error */
+ H2_PROXYS_EV_PROTO_ERROR, /* protocol error */
+ H2_PROXYS_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_PROXYS_EV_NO_IO, /* nothing has been read or written */
+ H2_PROXYS_EV_STREAM_SUBMITTED, /* stream has been submitted */
+ H2_PROXYS_EV_STREAM_DONE, /* stream has been finished */
+ H2_PROXYS_EV_STREAM_RESUMED, /* stream signalled availability of headers/data */
+ H2_PROXYS_EV_DATA_READ, /* connection data has been read */
+ H2_PROXYS_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+ H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */
+} h2_proxys_event_t;
+
+
+typedef struct h2_proxy_session h2_proxy_session;
+typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
+ int complete, int touched);
+
+struct h2_proxy_session {
+ const char *id;
+ conn_rec *c;
+ proxy_conn_rec *p_conn;
+ proxy_server_conf *conf;
+ apr_pool_t *pool;
+ nghttp2_session *ngh2; /* the nghttp2 session itself */
+
+ unsigned int aborted : 1;
+
+ h2_proxy_request_done *done;
+ void *user_data;
+
+ unsigned char window_bits_stream;
+ unsigned char window_bits_connection;
+
+ h2_proxys_state state;
+ apr_interval_time_t wait_timeout;
+
+ struct h2_ihash_t *streams;
+ struct h2_iqueue *suspended;
+ apr_size_t remote_max_concurrent;
+ int last_stream_id; /* last stream id processed by backend, or 0 */
+
+ apr_bucket_brigade *input;
+ apr_bucket_brigade *output;
+};
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done);
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *s, const char *url,
+ request_rec *r, int standalone);
+
+/**
+ * Perform a step in processing the proxy session. Will return aftert
+ * one read/write cycle and indicate session status by status code.
+ * @param s the session to process
+ * @return APR_EAGAIN when processing needs to be invoked again
+ * APR_SUCCESS when all streams have been processed, session still live
+ * APR_EOF when the session has been terminated
+ */
+apr_status_t h2_proxy_session_process(h2_proxy_session *s);
+
+void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
+
+void h2_proxy_session_update_window(h2_proxy_session *s,
+ conn_rec *c, apr_off_t bytes);
+
+#define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
+
+#endif /* h2_proxy_session_h */
diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c
new file mode 100644
index 00000000..839f4a5a
--- /dev/null
+++ b/modules/http2/h2_proxy_util.c
@@ -0,0 +1,705 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_request.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2.h"
+#include "h2_proxy_util.h"
+
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(apr_uint32_t n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+struct h2_ihash_t {
+ apr_hash_t *hash;
+ size_t ioff;
+};
+
+static unsigned int ihash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+{
+ h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t));
+ ih->hash = apr_hash_make_custom(pool, ihash);
+ ih->ioff = offset_of_int;
+ return ih;
+}
+
+size_t h2_ihash_count(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash);
+}
+
+int h2_ihash_empty(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash) == 0;
+}
+
+void *h2_ihash_get(h2_ihash_t *ih, int id)
+{
+ return apr_hash_get(ih->hash, &id, sizeof(id));
+}
+
+typedef struct {
+ h2_ihash_iter_t *iter;
+ void *ctx;
+} iter_ctx;
+
+static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
+}
+
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = fn;
+ ictx.ctx = ctx;
+ return apr_hash_do(ihash_iter, &ictx, ih->hash);
+}
+
+void h2_ihash_add(h2_ihash_t *ih, void *val)
+{
+ apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
+}
+
+void h2_ihash_remove(h2_ihash_t *ih, int id)
+{
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
+void h2_ihash_clear(h2_ihash_t *ih)
+{
+ apr_hash_clear(ih->hash);
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ int *buffer;
+ size_t max;
+ size_t len;
+} icollect_ctx;
+
+static int icollect_iter(void *x, void *val)
+{
+ icollect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff));
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max)
+{
+ icollect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, icollect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_iqueue *q, int nlen);
+static void iq_swap(h2_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx);
+
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
+ if (q) {
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ }
+ return q;
+}
+
+int h2_iq_empty(h2_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_iq_count(h2_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+}
+
+int h2_iq_remove(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_iq_clear(h2_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisions to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all tasks below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_iq_shift(h2_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+static void iq_grow(h2_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
+/*******************************************************************************
+ * h2_ngheader
+ ******************************************************************************/
+#define H2_HD_MATCH_LIT_CS(l, name) \
+ ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static int h2_util_ignore_header(const char *name)
+{
+ /* never forward, ch. 8.1.2.2 */
+ return (H2_HD_MATCH_LIT_CS("connection", name)
+ || H2_HD_MATCH_LIT_CS("proxy-connection", name)
+ || H2_HD_MATCH_LIT_CS("upgrade", name)
+ || H2_HD_MATCH_LIT_CS("keep-alive", name)
+ || H2_HD_MATCH_LIT_CS("transfer-encoding", name));
+}
+
+static int count_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ (*((size_t*)ctx))++;
+ }
+ return 1;
+}
+
+#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v))
+#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v))
+
+static int add_header(h2_ngheader *ngh,
+ const char *key, size_t key_len,
+ const char *value, size_t val_len)
+{
+ nghttp2_nv *nv = &ngh->nv[ngh->nvlen++];
+
+ nv->name = (uint8_t*)key;
+ nv->namelen = key_len;
+ nv->value = (uint8_t*)value;
+ nv->valuelen = val_len;
+ return 1;
+}
+
+static int add_table_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ add_header(ctx, key, strlen(key), value, strlen(value));
+ }
+ return 1;
+}
+
+h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
+ const struct h2_request *req)
+{
+
+ h2_ngheader *ngh;
+ size_t n;
+
+ AP_DEBUG_ASSERT(req);
+ AP_DEBUG_ASSERT(req->scheme);
+ AP_DEBUG_ASSERT(req->authority);
+ AP_DEBUG_ASSERT(req->path);
+ AP_DEBUG_ASSERT(req->method);
+
+ n = 4;
+ apr_table_do(count_header, &n, req->headers, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ NV_ADD_LIT_CS(ngh, ":scheme", req->scheme);
+ NV_ADD_LIT_CS(ngh, ":authority", req->authority);
+ NV_ADD_LIT_CS(ngh, ":path", req->path);
+ NV_ADD_LIT_CS(ngh, ":method", req->method);
+ apr_table_do(add_table_header, ngh, req->headers, NULL);
+
+ return ngh;
+}
+
+/*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+
+typedef struct {
+ const char *name;
+ size_t len;
+} literal;
+
+#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
+#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
+
+static literal IgnoredRequestHeaders[] = {
+ H2_DEF_LITERAL("expect"),
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("http2-settings"),
+ H2_DEF_LITERAL("proxy-connection"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredProxyRespHds[] = {
+ H2_DEF_LITERAL("alt-svc"),
+};
+
+static int ignore_header(const literal *lits, size_t llen,
+ const char *name, size_t nlen)
+{
+ const literal *lit;
+ int i;
+
+ for (i = 0; i < llen; ++i) {
+ lit = &lits[i];
+ if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int h2_req_ignore_header(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len);
+}
+
+int h2_proxy_res_ignore_header(const char *name, size_t len)
+{
+ return (h2_req_ignore_header(name, len)
+ || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
+}
+
+void h2_util_camel_case_header(char *s, size_t len)
+{
+ size_t start = 1;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ if (start) {
+ if (s[i] >= 'a' && s[i] <= 'z') {
+ s[i] -= 'a' - 'A';
+ }
+
+ start = 0;
+ }
+ else if (s[i] == '-') {
+ start = 1;
+ }
+ }
+}
+
+/*******************************************************************************
+ * h2 request handling
+ ******************************************************************************/
+
+/** Match a header value against a string constance, case insensitive */
+#define H2_HD_MATCH_LIT(l, name, nlen) \
+ ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+ const char *existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+ return APR_SUCCESS;
+ }
+ }
+ else if (H2_HD_MATCH_LIT("host", name, nlen)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+static h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header,
+ int serialize)
+{
+ h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+
+ req->id = id;
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+ req->serialize = serialize;
+
+ return req;
+}
+
+h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize)
+{
+ return h2_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
+}
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ size_t klen = strlen(key);
+ if (!h2_req_ignore_header(key, klen)) {
+ h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_status_t h2_req_make(h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers)
+{
+ h1_ctx x;
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+
+ AP_DEBUG_ASSERT(req->scheme);
+ AP_DEBUG_ASSERT(req->authority);
+ AP_DEBUG_ASSERT(req->path);
+ AP_DEBUG_ASSERT(req->method);
+
+ x.pool = pool;
+ x.headers = req->headers;
+ apr_table_do(set_h1_header, &x, headers, NULL);
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * frame logging
+ ******************************************************************************/
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+{
+ char scratch[128];
+ size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+
+ switch (frame->hd.type) {
+ case NGHTTP2_DATA: {
+ return apr_snprintf(buffer, maxlen,
+ "DATA[length=%d, flags=%d, stream=%d, padlen=%d]",
+ (int)frame->hd.length, frame->hd.flags,
+ frame->hd.stream_id, (int)frame->data.padlen);
+ }
+ case NGHTTP2_HEADERS: {
+ return apr_snprintf(buffer, maxlen,
+ "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+ }
+ case NGHTTP2_PRIORITY: {
+ return apr_snprintf(buffer, maxlen,
+ "PRIORITY[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_RST_STREAM: {
+ return apr_snprintf(buffer, maxlen,
+ "RST_STREAM[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_SETTINGS: {
+ if (frame->hd.flags & NGHTTP2_FLAG_ACK) {
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[ack=1, stream=%d]",
+ frame->hd.stream_id);
+ }
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[length=%d, stream=%d]",
+ (int)frame->hd.length, frame->hd.stream_id);
+ }
+ case NGHTTP2_PUSH_PROMISE: {
+ return apr_snprintf(buffer, maxlen,
+ "PUSH_PROMISE[length=%d, hend=%d, stream=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_PING: {
+ return apr_snprintf(buffer, maxlen,
+ "PING[length=%d, ack=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags&NGHTTP2_FLAG_ACK,
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+ scratch, frame->goaway.last_stream_id);
+ }
+ case NGHTTP2_WINDOW_UPDATE: {
+ return apr_snprintf(buffer, maxlen,
+ "WINDOW_UPDATE[stream=%d, incr=%d]",
+ frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ }
+ default:
+ return apr_snprintf(buffer, maxlen,
+ "type=%d[length=%d, flags=%d, stream=%d]",
+ frame->hd.type, (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+}
diff --git a/modules/http2/h2_proxy_util.h b/modules/http2/h2_proxy_util.h
new file mode 100644
index 00000000..98f297fa
--- /dev/null
+++ b/modules/http2/h2_proxy_util.h
@@ -0,0 +1,181 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_proxy_util__
+#define __mod_h2__h2_proxy_util__
+
+/*******************************************************************************
+ * some debugging/format helpers
+ ******************************************************************************/
+struct h2_request;
+struct nghttp2_frame;
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+typedef struct h2_ihash_t h2_ihash_t;
+typedef int h2_ihash_iter_t(void *ctx, void *val);
+
+/**
+ * Create a hash for structures that have an identifying int member.
+ * @param pool the pool to use
+ * @param offset_of_int the offsetof() the int member in the struct
+ */
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+
+size_t h2_ihash_count(h2_ihash_t *ih);
+int h2_ihash_empty(h2_ihash_t *ih);
+void *h2_ihash_get(h2_ihash_t *ih, int id);
+
+/**
+ * Iterate over the hash members (without defined order) and invoke
+ * fn for each member until 0 is returned.
+ * @param ih the hash to iterate over
+ * @param fn the function to invoke on each member
+ * @param ctx user supplied data passed into each iteration call
+ * @return 0 if one iteration returned 0, otherwise != 0
+ */
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx);
+
+void h2_ihash_add(h2_ihash_t *ih, void *val);
+void h2_ihash_remove(h2_ihash_t *ih, int id);
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val);
+void h2_ihash_clear(h2_ihash_t *ih);
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param id the identifier of the queue
+ * @param pool the memory pool
+ */
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no tasks in the queue.
+ * @param q the queue to check
+ */
+int h2_iq_empty(h2_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_iq_count(h2_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the task to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ */
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Remove the stream id from the queue. Return != 0 iff task
+ * was found in queue.
+ * @param q the task queue
+ * @param sid the stream id to remove
+ * @return != 0 iff task was found in queue
+ */
+int h2_iq_remove(h2_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_iq_clear(h2_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the task ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first stream id from the queue or NULL if the queue is empty.
+ * The task will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @return the first stream id of the queue, 0 if empty
+ */
+int h2_iq_shift(h2_iqueue *q);
+
+/*******************************************************************************
+ * common helpers
+ ******************************************************************************/
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(apr_uint32_t n);
+
+/*******************************************************************************
+ * HTTP/2 header helpers
+ ******************************************************************************/
+void h2_util_camel_case_header(char *s, size_t len);
+int h2_proxy_res_ignore_header(const char *name, size_t len);
+
+/*******************************************************************************
+ * nghttp2 helpers
+ ******************************************************************************/
+typedef struct h2_ngheader {
+ nghttp2_nv *nv;
+ apr_size_t nvlen;
+} h2_ngheader;
+h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
+ const struct h2_request *req);
+
+/*******************************************************************************
+ * h2_request helpers
+ ******************************************************************************/
+struct h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize);
+apr_status_t h2_req_make(struct h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers);
+
+
+
+#endif /* defined(__mod_h2__h2_proxy_util__) */
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
index 748e32ab..977fab58 100644
--- a/modules/http2/h2_push.c
+++ b/modules/http2/h2_push.c
@@ -346,9 +346,9 @@ static int add_push(link_ctx *ctx)
}
headers = apr_table_make(ctx->pool, 5);
apr_table_do(set_push_header, headers, ctx->req->headers, NULL);
- req = h2_request_createn(0, ctx->pool, method, ctx->req->scheme,
- ctx->req->authority, path, headers,
- ctx->req->serialize);
+ req = h2_req_createn(0, ctx->pool, method, ctx->req->scheme,
+ ctx->req->authority, path, headers,
+ ctx->req->serialize);
/* atm, we do not push on pushes */
h2_request_end_headers(req, ctx->pool, 1, 0);
push->req = req;
diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h
index d3519dcb..62f5a0a7 100644
--- a/modules/http2/h2_push.h
+++ b/modules/http2/h2_push.h
@@ -63,7 +63,7 @@ apr_array_header_t *h2_push_collect(apr_pool_t *p,
/**
* Create a new push diary for the given maximum number of entries.
*
- * @oaram p the pool to use
+ * @param p the pool to use
* @param N the max number of entries, rounded up to 2^x
* @return the created diary, might be NULL of max_entries is 0
*/
diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
index 2652661e..d213e167 100644
--- a/modules/http2/h2_request.c
+++ b/modules/http2/h2_request.c
@@ -35,31 +35,6 @@
#include "h2_util.h"
-h2_request *h2_request_create(int id, apr_pool_t *pool, int serialize)
-{
- return h2_request_createn(id, pool, NULL, NULL, NULL, NULL, NULL,
- serialize);
-}
-
-h2_request *h2_request_createn(int id, apr_pool_t *pool,
- const char *method, const char *scheme,
- const char *authority, const char *path,
- apr_table_t *header, int serialize)
-{
- h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
-
- req->id = id;
- req->method = method;
- req->scheme = scheme;
- req->authority = authority;
- req->path = path;
- req->headers = header? header : apr_table_make(pool, 10);
- req->request_time = apr_time_now();
- req->serialize = serialize;
-
- return req;
-}
-
static apr_status_t inspect_clen(h2_request *req, const char *s)
{
char *end;
@@ -67,111 +42,28 @@ static apr_status_t inspect_clen(h2_request *req, const char *s)
return (s == end)? APR_EINVAL : APR_SUCCESS;
}
-static apr_status_t add_h1_header(h2_request *req, apr_pool_t *pool,
- const char *name, size_t nlen,
- const char *value, size_t vlen)
-{
- char *hname, *hvalue;
-
- if (h2_req_ignore_header(name, nlen)) {
- return APR_SUCCESS;
- }
- else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
- const char *existing = apr_table_get(req->headers, "cookie");
- if (existing) {
- char *nval;
-
- /* Cookie header come separately in HTTP/2, but need
- * to be merged by "; " (instead of default ", ")
- */
- hvalue = apr_pstrndup(pool, value, vlen);
- nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
- apr_table_setn(req->headers, "Cookie", nval);
- return APR_SUCCESS;
- }
- }
- else if (H2_HD_MATCH_LIT("host", name, nlen)) {
- if (apr_table_get(req->headers, "Host")) {
- return APR_SUCCESS; /* ignore duplicate */
- }
- }
-
- hname = apr_pstrndup(pool, name, nlen);
- hvalue = apr_pstrndup(pool, value, vlen);
- h2_util_camel_case_header(hname, nlen);
- apr_table_mergen(req->headers, hname, hvalue);
-
- return APR_SUCCESS;
-}
-
-typedef struct {
- h2_request *req;
- apr_pool_t *pool;
-} h1_ctx;
-
-static int set_h1_header(void *ctx, const char *key, const char *value)
-{
- h1_ctx *x = ctx;
- size_t klen = strlen(key);
- if (!h2_req_ignore_header(key, klen)) {
- add_h1_header(x->req, x->pool, key, klen, value, strlen(value));
- }
- return 1;
-}
-
-static apr_status_t add_all_h1_header(h2_request *req, apr_pool_t *pool,
- apr_table_t *header)
-{
- h1_ctx x;
- x.req = req;
- x.pool = pool;
- apr_table_do(set_h1_header, &x, header, NULL);
- return APR_SUCCESS;
-}
-
-
-apr_status_t h2_request_make(h2_request *req, apr_pool_t *pool,
- const char *method, const char *scheme,
- const char *authority, const char *path,
- apr_table_t *headers)
-{
- req->method = method;
- req->scheme = scheme;
- req->authority = authority;
- req->path = path;
-
- AP_DEBUG_ASSERT(req->scheme);
- AP_DEBUG_ASSERT(req->authority);
- AP_DEBUG_ASSERT(req->path);
- AP_DEBUG_ASSERT(req->method);
-
- return add_all_h1_header(req, pool, headers);
-}
-
-apr_status_t h2_request_rwrite(h2_request *req, request_rec *r)
+apr_status_t h2_request_rwrite(h2_request *req, apr_pool_t *pool,
+ request_rec *r)
{
apr_status_t status;
const char *scheme, *authority;
- scheme = (r->parsed_uri.scheme? r->parsed_uri.scheme
+ scheme = apr_pstrdup(pool, r->parsed_uri.scheme? r->parsed_uri.scheme
: ap_http_scheme(r));
- authority = r->hostname;
+ authority = apr_pstrdup(pool, r->hostname);
if (!ap_strchr_c(authority, ':') && r->server && r->server->port) {
apr_port_t defport = apr_uri_port_of_scheme(scheme);
if (defport != r->server->port) {
/* port info missing and port is not default for scheme: append */
- authority = apr_psprintf(r->pool, "%s:%d", authority,
+ authority = apr_psprintf(pool, "%s:%d", authority,
(int)r->server->port);
}
}
- status = h2_request_make(req, r->pool, r->method, scheme, authority,
- apr_uri_unparse(r->pool, &r->parsed_uri,
- APR_URI_UNP_OMITSITEPART),
- r->headers_in);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03058)
- "h2_request(%d): rwrite %s host=%s://%s%s",
- req->id, req->method, req->scheme, req->authority, req->path);
+ status = h2_req_make(req, pool, apr_pstrdup(pool, r->method), scheme,
+ authority, apr_uri_unparse(pool, &r->parsed_uri,
+ APR_URI_UNP_OMITSITEPART),
+ r->headers_in);
return status;
}
@@ -223,7 +115,7 @@ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
}
else {
/* non-pseudo header, append to work bucket of stream */
- status = add_h1_header(req, pool, name, nlen, value, vlen);
+ status = h2_headers_add_h1(req->headers, pool, name, nlen, value, vlen);
}
return status;
@@ -235,7 +127,8 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
const char *s;
if (req->eoh) {
- return APR_EINVAL;
+ /* already done */
+ return APR_SUCCESS;
}
/* rfc7540, ch. 8.1.2.3:
@@ -337,37 +230,18 @@ apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
return add_h1_trailer(req, pool, name, nlen, value, vlen);
}
-#define OPT_COPY(p, s) ((s)? apr_pstrdup(p, s) : NULL)
-
-void h2_request_copy(apr_pool_t *p, h2_request *dst, const h2_request *src)
+h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
{
- /* keep the dst id */
- dst->initiated_on = src->initiated_on;
- dst->method = OPT_COPY(p, src->method);
- dst->scheme = OPT_COPY(p, src->scheme);
- dst->authority = OPT_COPY(p, src->authority);
- dst->path = OPT_COPY(p, src->path);
- dst->headers = apr_table_clone(p, src->headers);
+ h2_request *dst = apr_pmemdup(p, src, sizeof(*dst));
+ dst->method = apr_pstrdup(p, src->method);
+ dst->scheme = apr_pstrdup(p, src->scheme);
+ dst->authority = apr_pstrdup(p, src->authority);
+ dst->path = apr_pstrdup(p, src->path);
+ dst->headers = apr_table_clone(p, src->headers);
if (src->trailers) {
- dst->trailers = apr_table_clone(p, src->trailers);
+ dst->trailers = apr_table_clone(p, src->trailers);
}
- else {
- dst->trailers = NULL;
- }
- dst->content_length = src->content_length;
- dst->chunked = src->chunked;
- dst->eoh = src->eoh;
- dst->body = src->body;
- dst->serialize = src->serialize;
- dst->push_policy = src->push_policy;
-}
-
-h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
-{
- h2_request *nreq = apr_pcalloc(p, sizeof(*nreq));
- memcpy(nreq, src, sizeof(*nreq));
- h2_request_copy(p, nreq, src);
- return nreq;
+ return dst;
}
request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn)
@@ -436,7 +310,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn)
}
ap_parse_uri(r, req->path);
- r->protocol = "HTTP/2";
+ r->protocol = "HTTP/2.0";
r->proto_num = HTTP_VERSION(2, 0);
r->the_request = apr_psprintf(r->pool, "%s %s %s",
diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h
index da87d70a..ba48f4a1 100644
--- a/modules/http2/h2_request.h
+++ b/modules/http2/h2_request.h
@@ -18,19 +18,8 @@
#include "h2.h"
-h2_request *h2_request_create(int id, apr_pool_t *pool, int serialize);
-
-h2_request *h2_request_createn(int id, apr_pool_t *pool,
- const char *method, const char *scheme,
- const char *authority, const char *path,
- apr_table_t *headers, int serialize);
-
-apr_status_t h2_request_make(h2_request *req, apr_pool_t *pool,
- const char *method, const char *scheme,
- const char *authority, const char *path,
- apr_table_t *headers);
-
-apr_status_t h2_request_rwrite(h2_request *req, request_rec *r);
+apr_status_t h2_request_rwrite(h2_request *req, apr_pool_t *pool,
+ request_rec *r);
apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
const char *name, size_t nlen,
@@ -43,8 +32,6 @@ apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
int eos, int push);
-void h2_request_copy(apr_pool_t *p, h2_request *dst, const h2_request *src);
-
h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
/**
diff --git a/modules/http2/h2_response.c b/modules/http2/h2_response.c
index eb9043d0..4cafd355 100644
--- a/modules/http2/h2_response.c
+++ b/modules/http2/h2_response.c
@@ -171,13 +171,14 @@ h2_response *h2_response_die(int stream_id, apr_status_t type,
{
apr_table_t *headers = apr_table_make(pool, 5);
char *date = NULL;
+ int status = (type >= 200 && type < 600)? type : 500;
date = apr_palloc(pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, req->request_time);
apr_table_setn(headers, "Date", date);
apr_table_setn(headers, "Server", ap_get_server_banner());
- return h2_response_create_int(stream_id, 0, 500, headers, NULL, pool);
+ return h2_response_create_int(stream_id, 0, status, headers, NULL, pool);
}
h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from)
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
index 928bb4a6..598df177 100644
--- a/modules/http2/h2_session.c
+++ b/modules/http2/h2_session.c
@@ -28,6 +28,7 @@
#include <scoreboard.h>
#include "h2_private.h"
+#include "h2.h"
#include "h2_bucket_eoc.h"
#include "h2_bucket_eos.h"
#include "h2_config.h"
@@ -56,7 +57,7 @@ static int h2_session_status_from_apr_status(apr_status_t rv)
return NGHTTP2_ERR_WOULDBLOCK;
}
else if (APR_STATUS_IS_EOF(rv)) {
- return NGHTTP2_ERR_EOF;
+ return NGHTTP2_ERR_EOF;
}
return NGHTTP2_ERR_PROTO;
}
@@ -78,6 +79,18 @@ static int is_accepting_streams(h2_session *session);
static void dispatch_event(h2_session *session, h2_session_event_t ev,
int err, const char *msg);
+apr_status_t h2_session_stream_done(h2_session *session, h2_stream *stream)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): EOS bucket cleanup -> done",
+ session->id, stream->id);
+ h2_ihash_remove(session->streams, stream->id);
+ h2_mplx_stream_done(session->mplx, stream);
+
+ dispatch_event(session, H2_SESSION_EV_STREAM_DONE, 0, NULL);
+ return APR_SUCCESS;
+}
+
typedef struct stream_sel_ctx {
h2_session *session;
h2_stream *candidate;
@@ -112,7 +125,7 @@ static void cleanup_streams(h2_session *session)
while (1) {
h2_ihash_iter(session->streams, find_cleanup_stream, &ctx);
if (ctx.candidate) {
- h2_session_stream_destroy(session, ctx.candidate);
+ h2_session_stream_done(session, ctx.candidate);
ctx.candidate = NULL;
}
else {
@@ -121,23 +134,20 @@ static void cleanup_streams(h2_session *session)
}
}
-h2_stream *h2_session_open_stream(h2_session *session, int stream_id)
+h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+ int initiated_on, const h2_request *req)
{
h2_stream * stream;
apr_pool_t *stream_pool;
- if (session->spare) {
- stream_pool = session->spare;
- session->spare = NULL;
- }
- else {
- apr_pool_create(&stream_pool, session->pool);
- apr_pool_tag(stream_pool, "h2_stream");
- }
-
- stream = h2_stream_open(stream_id, stream_pool, session);
+ apr_pool_create(&stream_pool, session->pool);
+ apr_pool_tag(stream_pool, "h2_stream");
+ stream = h2_stream_open(stream_id, stream_pool, session,
+ initiated_on, req);
+ nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
h2_ihash_add(session->streams, stream);
+
if (H2_STREAM_CLIENT_INITIATED(stream_id)) {
if (stream_id > session->remote.emitted_max) {
++session->remote.emitted_count;
@@ -151,6 +161,7 @@ h2_stream *h2_session_open_stream(h2_session *session, int stream_id)
session->remote.emitted_max = stream->id;
}
}
+ dispatch_event(session, H2_SESSION_EV_STREAM_OPEN, 0, NULL);
return stream;
}
@@ -252,13 +263,18 @@ static int on_invalid_frame_recv_cb(nghttp2_session *ngh2,
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03063)
- "h2_session(%ld): recv unknown FRAME[%s], frames=%ld/%ld (r/s)",
+ "h2_session(%ld): recv invalid FRAME[%s], frames=%ld/%ld (r/s)",
session->id, buffer, (long)session->frames_received,
(long)session->frames_sent);
}
return 0;
}
+static h2_stream *get_stream(h2_session *session, int stream_id)
+{
+ return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+}
+
static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
int32_t stream_id,
const uint8_t *data, size_t len, void *userp)
@@ -274,7 +290,7 @@ static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
return 0;
}
- stream = h2_session_get_stream(session, stream_id);
+ stream = get_stream(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064)
"h2_stream(%ld-%d): on_data_chunk for unknown stream",
@@ -309,8 +325,12 @@ static apr_status_t stream_release(h2_session *session,
h2_stream *stream,
uint32_t error_code)
{
+ conn_rec *c = session->c;
+ apr_bucket *b;
+ apr_status_t status;
+
if (!error_code) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_stream(%ld-%d): handled, closing",
session->id, (int)stream->id);
if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
@@ -320,16 +340,18 @@ static apr_status_t stream_release(h2_session *session,
}
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03065)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03065)
"h2_stream(%ld-%d): closing with err=%d %s",
session->id, (int)stream->id, (int)error_code,
h2_h2_err_description(error_code));
h2_stream_rst(stream, error_code);
}
- return h2_conn_io_writeb(&session->io,
- h2_bucket_eos_create(session->c->bucket_alloc,
- stream));
+ b = h2_bucket_eos_create(c->bucket_alloc, stream);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ status = h2_conn_io_pass(&session->io, session->bbtmp);
+ apr_brigade_cleanup(session->bbtmp);
+ return status;
}
static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
@@ -339,7 +361,7 @@ static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
h2_stream *stream;
(void)ngh2;
- stream = h2_session_get_stream(session, stream_id);
+ stream = get_stream(session, stream_id);
if (stream) {
stream_release(session, stream, error_code);
}
@@ -355,12 +377,12 @@ static int on_begin_headers_cb(nghttp2_session *ngh2,
/* We may see HEADERs at the start of a stream or after all DATA
* streams to carry trailers. */
(void)ngh2;
- s = h2_session_get_stream(session, frame->hd.stream_id);
+ s = get_stream(session, frame->hd.stream_id);
if (s) {
/* nop */
}
else {
- s = h2_session_open_stream((h2_session *)userp, frame->hd.stream_id);
+ s = h2_session_open_stream(userp, frame->hd.stream_id, 0, NULL);
}
return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED;
}
@@ -375,26 +397,24 @@ static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
h2_stream * stream;
apr_status_t status;
- (void)ngh2;
(void)flags;
if (!is_accepting_streams(session)) {
/* just ignore */
return 0;
}
- stream = h2_session_get_stream(session, frame->hd.stream_id);
+ stream = get_stream(session, frame->hd.stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
APLOGNO(02920)
- "h2_session: stream(%ld-%d): on_header for unknown stream",
+ "h2_session: stream(%ld-%d): on_header unknown stream",
session->id, (int)frame->hd.stream_id);
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
status = h2_stream_add_header(stream, (const char *)name, namelen,
(const char *)value, valuelen);
-
- if (status != APR_SUCCESS) {
+ if (status != APR_SUCCESS && !stream->response) {
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
return 0;
@@ -429,7 +449,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
/* This can be HEADERS for a new stream, defining the request,
* or HEADER may come after DATA at the end of a stream as in
* trailers */
- stream = h2_session_get_stream(session, frame->hd.stream_id);
+ stream = get_stream(session, frame->hd.stream_id);
if (stream) {
int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
@@ -453,7 +473,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
}
break;
case NGHTTP2_DATA:
- stream = h2_session_get_stream(session, frame->hd.stream_id);
+ stream = get_stream(session, frame->hd.stream_id);
if (stream) {
int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
@@ -490,7 +510,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
"h2_session(%ld-%d): RST_STREAM by client, errror=%d",
session->id, (int)frame->hd.stream_id,
(int)frame->rst_stream.error_code);
- stream = h2_session_get_stream(session, frame->hd.stream_id);
+ stream = get_stream(session, frame->hd.stream_id);
if (stream && stream->request && stream->request->initiated_on) {
++session->pushes_reset;
}
@@ -533,13 +553,6 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
return 0;
}
-static apr_status_t pass_data(void *ctx,
- const char *data, apr_off_t length)
-{
- return h2_conn_io_write(&((h2_session*)ctx)->io, data, length);
-}
-
-
static char immortal_zeros[H2_MAX_PADLEN];
static int on_send_data_cb(nghttp2_session *ngh2,
@@ -556,6 +569,7 @@ static int on_send_data_cb(nghttp2_session *ngh2,
int eos;
h2_stream *stream;
apr_bucket *b;
+ apr_off_t len = length;
(void)ngh2;
(void)source;
@@ -564,65 +578,57 @@ static int on_send_data_cb(nghttp2_session *ngh2,
}
padlen = (unsigned char)frame->data.padlen;
- stream = h2_session_get_stream(session, stream_id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): send_data_cb for %ld bytes",
+ session->id, (int)stream_id, (long)length);
+
+ stream = get_stream(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c,
APLOGNO(02924)
- "h2_stream(%ld-%d): send_data",
+ "h2_stream(%ld-%d): send_data, lookup stream",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_stream(%ld-%d): send_data_cb for %ld bytes",
- session->id, (int)stream_id, (long)length);
-
- if (h2_conn_io_is_buffered(&session->io)) {
- status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
- if (status == APR_SUCCESS) {
- if (padlen) {
- status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
- }
-
- if (status == APR_SUCCESS) {
- apr_off_t len = length;
- status = h2_stream_readx(stream, pass_data, session, &len, &eos);
- if (status == APR_SUCCESS && len != length) {
- status = APR_EINVAL;
- }
- }
-
- if (status == APR_SUCCESS && padlen) {
- if (padlen) {
- status = h2_conn_io_write(&session->io, immortal_zeros, padlen);
- }
- }
- }
+ status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
+ if (padlen && status == APR_SUCCESS) {
+ status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
}
- else {
- status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
- if (padlen && status == APR_SUCCESS) {
- status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
- }
- if (status == APR_SUCCESS) {
- apr_off_t len = length;
- status = h2_stream_read_to(stream, session->io.output, &len, &eos);
- if (status == APR_SUCCESS && len != length) {
- status = APR_EINVAL;
- }
- }
-
- if (status == APR_SUCCESS && padlen) {
- b = apr_bucket_immortal_create(immortal_zeros, padlen,
- session->c->bucket_alloc);
- status = h2_conn_io_writeb(&session->io, b);
- }
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): writing frame header",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_read_to(stream, session->bbtmp, &len, &eos);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): send_data_cb, reading stream",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ else if (len != length) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): send_data_cb, wanted %ld bytes, "
+ "got %ld from stream",
+ session->id, (int)stream_id, (long)length, (long)len);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
}
+ if (padlen) {
+ b = apr_bucket_immortal_create(immortal_zeros, padlen,
+ session->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ }
+ status = h2_conn_io_pass(&session->io, session->bbtmp);
+
+ apr_brigade_cleanup(session->bbtmp);
if (status == APR_SUCCESS) {
stream->data_frames_sent++;
- h2_conn_io_consider_pass(&session->io);
return 0;
}
else {
@@ -630,9 +636,8 @@ static int on_send_data_cb(nghttp2_session *ngh2,
APLOGNO(02925)
"h2_stream(%ld-%d): failed send_data_cb",
session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
}
-
- return h2_session_status_from_apr_status(status);
}
static int on_frame_send_cb(nghttp2_session *ngh2,
@@ -679,43 +684,30 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
return APR_SUCCESS;
}
-static void h2_session_cleanup(h2_session *session)
+static void h2_session_destroy(h2_session *session)
{
- AP_DEBUG_ASSERT(session);
- /* This is an early cleanup of the session that may
- * discard what is no longer necessary for *new* streams
- * and general HTTP/2 processing.
- * At this point, all frames are in transit or somehwere in
- * our buffers or passed down output filters.
- * h2 streams might still being written out.
- */
- if (session->c) {
- h2_ctx_clear(session->c);
+ AP_DEBUG_ASSERT(session);
+
+ h2_ihash_clear(session->streams);
+ if (session->mplx) {
+ h2_mplx_set_consumed_cb(session->mplx, NULL, NULL);
+ h2_mplx_release_and_join(session->mplx, session->iowait);
+ session->mplx = NULL;
}
+
+ ap_remove_input_filter_byhandle((session->r? session->r->input_filters :
+ session->c->input_filters), "H2_IN");
if (session->ngh2) {
nghttp2_session_del(session->ngh2);
session->ngh2 = NULL;
}
- if (session->spare) {
- apr_pool_destroy(session->spare);
- session->spare = NULL;
+ if (session->c) {
+ h2_ctx_clear(session->c);
}
-}
-
-static void h2_session_destroy(h2_session *session)
-{
- AP_DEBUG_ASSERT(session);
- h2_session_cleanup(session);
if (APLOGctrace1(session->c)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_session(%ld): destroy, %d streams open",
- session->id, (int)h2_ihash_count(session->streams));
- }
- if (session->mplx) {
- h2_mplx_set_consumed_cb(session->mplx, NULL, NULL);
- h2_mplx_release_and_join(session->mplx, session->iowait);
- session->mplx = NULL;
+ "h2_session(%ld): destroy", session->id);
}
if (session->pool) {
apr_pool_destroy(session->pool);
@@ -897,7 +889,7 @@ static h2_session *h2_session_create_int(conn_rec *c,
h2_session_receive, session);
ap_add_input_filter("H2_IN", session->cin, r, c);
- h2_conn_io_init(&session->io, c, session->config, session->pool);
+ h2_conn_io_init(&session->io, c, session->config);
session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
status = init_callbacks(c, &callbacks);
@@ -1030,7 +1022,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
}
/* Now we need to auto-open stream 1 for the request we got. */
- stream = h2_session_open_stream(session, 1);
+ stream = h2_session_open_stream(session, 1, 0, NULL);
if (!stream) {
status = APR_EGENERAL;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
@@ -1095,65 +1087,6 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
return status;
}
-typedef struct {
- h2_session *session;
- int resume_count;
-} resume_ctx;
-
-static int resume_on_data(void *ctx, void *val)
-{
- h2_stream *stream = val;
- resume_ctx *rctx = (resume_ctx*)ctx;
- h2_session *session = rctx->session;
- AP_DEBUG_ASSERT(session);
- AP_DEBUG_ASSERT(stream);
-
- if (h2_stream_is_suspended(stream)) {
- apr_status_t status;
- apr_off_t len = -1;
- int eos;
-
- status = h2_stream_out_prepare(stream, &len, &eos);
- if (status == APR_SUCCESS) {
- int rv;
- h2_stream_set_suspended(stream, 0);
- ++rctx->resume_count;
-
- rv = nghttp2_session_resume_data(session->ngh2, stream->id);
- ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
- APLOG_ERR : APLOG_DEBUG, 0, session->c,
- APLOGNO(02936)
- "h2_stream(%ld-%d): resuming %s, len=%ld, eos=%d",
- session->id, stream->id,
- rv? nghttp2_strerror(rv) : "", (long)len, eos);
- }
- }
- return 1;
-}
-
-static int h2_session_resume_streams_with_data(h2_session *session)
-{
- AP_DEBUG_ASSERT(session);
- if (!h2_ihash_is_empty(session->streams)
- && session->mplx && !session->mplx->aborted) {
- resume_ctx ctx;
-
- ctx.session = session;
- ctx.resume_count = 0;
-
- /* Resume all streams where we have data in the out queue and
- * which had been suspended before. */
- h2_ihash_iter(session->streams, resume_on_data, &ctx);
- return ctx.resume_count;
- }
- return 0;
-}
-
-h2_stream *h2_session_get_stream(h2_session *session, int stream_id)
-{
- return h2_ihash_get(session->streams, stream_id);
-}
-
static ssize_t stream_data_cb(nghttp2_session *ng2s,
int32_t stream_id,
uint8_t *buf,
@@ -1179,7 +1112,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
(void)ng2s;
(void)buf;
(void)source;
- stream = h2_session_get_stream(session, stream_id);
+ stream = get_stream(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
APLOGNO(02937)
@@ -1209,7 +1142,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
* it. Remember at our h2_stream that we need to do this.
*/
nread = 0;
- h2_stream_set_suspended(stream, 1);
+ h2_mplx_suspend_stream(session->mplx, stream->id);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03071)
"h2_stream(%ld-%d): suspending",
session->id, (int)stream_id);
@@ -1252,102 +1185,6 @@ typedef struct {
size_t offset;
} nvctx_t;
-/**
- * Start submitting the response to a stream request. This is possible
- * once we have all the response headers. The response body will be
- * read by the session using the callback we supply.
- */
-static apr_status_t submit_response(h2_session *session, h2_stream *stream)
-{
- apr_status_t status = APR_SUCCESS;
- h2_response *response = h2_stream_get_response(stream);
- int rv = 0;
- AP_DEBUG_ASSERT(session);
- AP_DEBUG_ASSERT(stream);
- AP_DEBUG_ASSERT(response || stream->rst_error);
-
- if (stream->submitted) {
- rv = NGHTTP2_PROTOCOL_ERROR;
- }
- else if (response && response->headers) {
- nghttp2_data_provider provider, *pprovider = NULL;
- h2_ngheader *ngh;
- const h2_priority *prio;
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03073)
- "h2_stream(%ld-%d): submit response %d",
- session->id, stream->id, response->http_status);
-
- if (response->content_length != 0) {
- memset(&provider, 0, sizeof(provider));
- provider.source.fd = stream->id;
- provider.read_callback = stream_data_cb;
- pprovider = &provider;
- }
-
- /* If this stream is not a pushed one itself,
- * and HTTP/2 server push is enabled here,
- * and the response is in the range 200-299 *),
- * and the remote side has pushing enabled,
- * -> find and perform any pushes on this stream
- * *before* we submit the stream response itself.
- * This helps clients avoid opening new streams on Link
- * headers that get pushed right afterwards.
- *
- * *) the response code is relevant, as we do not want to
- * make pushes on 401 or 403 codes, neiterh on 301/302
- * and friends. And if we see a 304, we do not push either
- * as the client, having this resource in its cache, might
- * also have the pushed ones as well.
- */
- if (stream->request && !stream->request->initiated_on
- && H2_HTTP_2XX(response->http_status)
- && h2_session_push_enabled(session)) {
-
- h2_stream_submit_pushes(stream);
- }
-
- prio = h2_stream_get_priority(stream);
- if (prio) {
- h2_session_set_prio(session, stream, prio);
- /* no showstopper if that fails for some reason */
- }
-
- ngh = h2_util_ngheader_make_res(stream->pool, response->http_status,
- response->headers);
- rv = nghttp2_submit_response(session->ngh2, response->stream_id,
- ngh->nv, ngh->nvlen, pprovider);
- }
- else {
- int err = H2_STREAM_RST(stream, H2_ERR_PROTOCOL_ERROR);
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03074)
- "h2_stream(%ld-%d): RST_STREAM, err=%d",
- session->id, stream->id, err);
-
- rv = nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
- stream->id, err);
- }
-
- stream->submitted = 1;
- if (stream->request && stream->request->initiated_on) {
- ++session->pushes_submitted;
- }
- else {
- ++session->responses_submitted;
- }
-
- if (nghttp2_is_fatal(rv)) {
- status = APR_EGENERAL;
- dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
- APLOGNO(02940) "submit_response: %s",
- nghttp2_strerror(rv));
- }
-
- return status;
-}
-
struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
h2_push *push)
{
@@ -1372,15 +1209,13 @@ struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
session->id, is->id, nid,
push->req->method, push->req->path, is->id);
- stream = h2_session_open_stream(session, nid);
+ stream = h2_session_open_stream(session, nid, is->id, push->req);
if (stream) {
- h2_stream_set_h2_request(stream, is->id, push->req);
status = stream_schedule(session, stream, 1);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
"h2_stream(%ld-%d): scheduling push stream",
session->id, stream->id);
- h2_stream_cleanup(stream);
stream = NULL;
}
++session->unsent_promises;
@@ -1503,34 +1338,6 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
return status;
}
-apr_status_t h2_session_stream_destroy(h2_session *session, h2_stream *stream)
-{
- apr_pool_t *pool = h2_stream_detach_pool(stream);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_stream(%ld-%d): cleanup by EOS bucket destroy",
- session->id, stream->id);
- /* this may be called while the session has already freed
- * some internal structures or even when the mplx is locked. */
- if (session->mplx) {
- h2_mplx_stream_done(session->mplx, stream->id, stream->rst_error);
- }
-
- if (session->streams) {
- h2_ihash_remove(session->streams, stream->id);
- }
- h2_stream_destroy(stream);
-
- if (pool) {
- apr_pool_clear(pool);
- if (session->spare) {
- apr_pool_destroy(session->spare);
- }
- session->spare = pool;
- }
- return APR_SUCCESS;
-}
-
int h2_session_push_enabled(h2_session *session)
{
/* iff we can and they can and want */
@@ -1557,6 +1364,7 @@ static apr_status_t h2_session_send(h2_session *session)
if (socket) {
apr_socket_timeout_set(socket, saved_timeout);
}
+ session->have_written = 1;
if (rv != 0) {
if (nghttp2_is_fatal(rv)) {
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
@@ -1570,6 +1378,148 @@ static apr_status_t h2_session_send(h2_session *session)
return APR_SUCCESS;
}
+/**
+ * A stream was resumed as new output data arrived.
+ */
+static apr_status_t on_stream_resume(void *ctx, int stream_id)
+{
+ h2_session *session = ctx;
+ h2_stream *stream = get_stream(session, stream_id);
+ apr_status_t status = APR_SUCCESS;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): on_resume", session->id, stream_id);
+ if (stream) {
+ int rv = nghttp2_session_resume_data(session->ngh2, stream_id);
+ session->have_written = 1;
+ ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
+ APLOG_ERR : APLOG_DEBUG, 0, session->c,
+ APLOGNO(02936)
+ "h2_stream(%ld-%d): resuming %s",
+ session->id, stream->id, rv? nghttp2_strerror(rv) : "");
+ }
+ return status;
+}
+
+/**
+ * A response for the stream is ready.
+ */
+static apr_status_t on_stream_response(void *ctx, int stream_id)
+{
+ h2_session *session = ctx;
+ h2_stream *stream = get_stream(session, stream_id);
+ apr_status_t status = APR_SUCCESS;
+ h2_response *response;
+ int rv = 0;
+
+ AP_DEBUG_ASSERT(session);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): on_response", session->id, stream_id);
+ if (!stream) {
+ return APR_NOTFOUND;
+ }
+
+ response = h2_stream_get_response(stream);
+ AP_DEBUG_ASSERT(response || stream->rst_error);
+
+ if (stream->submitted) {
+ rv = NGHTTP2_PROTOCOL_ERROR;
+ }
+ else if (response && response->headers) {
+ nghttp2_data_provider provider, *pprovider = NULL;
+ h2_ngheader *ngh;
+ const h2_priority *prio;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03073)
+ "h2_stream(%ld-%d): submit response %d, REMOTE_WINDOW_SIZE=%u",
+ session->id, stream->id, response->http_status,
+ (unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id));
+
+ if (response->content_length != 0) {
+ memset(&provider, 0, sizeof(provider));
+ provider.source.fd = stream->id;
+ provider.read_callback = stream_data_cb;
+ pprovider = &provider;
+ }
+
+ /* If this stream is not a pushed one itself,
+ * and HTTP/2 server push is enabled here,
+ * and the response is in the range 200-299 *),
+ * and the remote side has pushing enabled,
+ * -> find and perform any pushes on this stream
+ * *before* we submit the stream response itself.
+ * This helps clients avoid opening new streams on Link
+ * headers that get pushed right afterwards.
+ *
+ * *) the response code is relevant, as we do not want to
+ * make pushes on 401 or 403 codes, neiterh on 301/302
+ * and friends. And if we see a 304, we do not push either
+ * as the client, having this resource in its cache, might
+ * also have the pushed ones as well.
+ */
+ if (stream->request && !stream->request->initiated_on
+ && H2_HTTP_2XX(response->http_status)
+ && h2_session_push_enabled(session)) {
+
+ h2_stream_submit_pushes(stream);
+ }
+
+ prio = h2_stream_get_priority(stream);
+ if (prio) {
+ h2_session_set_prio(session, stream, prio);
+ /* no showstopper if that fails for some reason */
+ }
+
+ ngh = h2_util_ngheader_make_res(stream->pool, response->http_status,
+ response->headers);
+ rv = nghttp2_submit_response(session->ngh2, response->stream_id,
+ ngh->nv, ngh->nvlen, pprovider);
+ }
+ else {
+ int err = H2_STREAM_RST(stream, H2_ERR_PROTOCOL_ERROR);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03074)
+ "h2_stream(%ld-%d): RST_STREAM, err=%d",
+ session->id, stream->id, err);
+
+ rv = nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, err);
+ }
+
+ stream->submitted = 1;
+ session->have_written = 1;
+
+ if (stream->request && stream->request->initiated_on) {
+ ++session->pushes_submitted;
+ }
+ else {
+ ++session->responses_submitted;
+ }
+
+ if (nghttp2_is_fatal(rv)) {
+ status = APR_EGENERAL;
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ APLOGNO(02940) "submit_response: %s",
+ nghttp2_strerror(rv));
+ }
+
+ ++session->unsent_submits;
+
+ /* Unsent push promises are written immediately, as nghttp2
+ * 1.5.0 realizes internal stream data structures only on
+ * send and we might need them for other submits.
+ * Also, to conserve memory, we send at least every 10 submits
+ * so that nghttp2 does not buffer all outbound items too
+ * long.
+ */
+ if (status == APR_SUCCESS
+ && (session->unsent_promises || session->unsent_submits > 10)) {
+ status = h2_session_send(session);
+ }
+ return status;
+}
+
static apr_status_t h2_session_receive(void *ctx, const char *data,
apr_size_t len, apr_size_t *readlen)
{
@@ -1697,36 +1647,6 @@ static int has_suspended_streams(h2_session *session)
return has_suspended;
}
-static apr_status_t h2_session_submit(h2_session *session)
-{
- apr_status_t status = APR_EAGAIN;
- h2_stream *stream;
-
- if (has_unsubmitted_streams(session)) {
- /* If we have responses ready, submit them now. */
- while ((stream = h2_mplx_next_submit(session->mplx, session->streams))) {
- status = submit_response(session, stream);
- ++session->unsent_submits;
-
- /* Unsent push promises are written immediately, as nghttp2
- * 1.5.0 realizes internal stream data structures only on
- * send and we might need them for other submits.
- * Also, to conserve memory, we send at least every 10 submits
- * so that nghttp2 does not buffer all outbound items too
- * long.
- */
- if (status == APR_SUCCESS
- && (session->unsent_promises || session->unsent_submits > 10)) {
- status = h2_session_send(session);
- if (status != APR_SUCCESS) {
- break;
- }
- }
- }
- }
- return status;
-}
-
static const char *StateNames[] = {
"INIT", /* H2_SESSION_ST_INIT */
"DONE", /* H2_SESSION_ST_DONE */
@@ -1757,12 +1677,51 @@ static int is_accepting_streams(h2_session *session)
}
}
+static void update_child_status(h2_session *session, int status, const char *msg)
+{
+ /* Assume that we also change code/msg when something really happened and
+ * avoid updating the scoreboard in between */
+ if (session->last_status_code != status
+ || session->last_status_msg != msg) {
+ apr_snprintf(session->status, sizeof(session->status),
+ "%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)",
+ msg? msg : "-",
+ (int)session->open_streams,
+ (int)session->remote.emitted_count,
+ (int)session->responses_submitted,
+ (int)session->pushes_submitted,
+ (int)session->pushes_reset + session->streams_reset);
+ ap_update_child_status_descr(session->c->sbh, status, session->status);
+ }
+}
+
static void transit(h2_session *session, const char *action, h2_session_state nstate)
{
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03078)
- "h2_session(%ld): transit [%s] -- %s --> [%s]", session->id,
- state_name(session->state), action, state_name(nstate));
- session->state = nstate;
+ if (session->state != nstate) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03078)
+ "h2_session(%ld): transit [%s] -- %s --> [%s]", session->id,
+ state_name(session->state), action, state_name(nstate));
+ session->state = nstate;
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ update_child_status(session, (session->open_streams == 0?
+ SERVER_BUSY_KEEPALIVE
+ : SERVER_BUSY_READ), "idle");
+ break;
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ update_child_status(session, SERVER_CLOSING, "remote goaway");
+ break;
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ update_child_status(session, SERVER_CLOSING, "local goaway");
+ break;
+ case H2_SESSION_ST_DONE:
+ update_child_status(session, SERVER_CLOSING, "done");
+ break;
+ default:
+ /* nop */
+ break;
+ }
+ }
}
static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
@@ -1771,7 +1730,6 @@ static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
case H2_SESSION_ST_INIT:
transit(session, "init", H2_SESSION_ST_BUSY);
break;
-
default:
/* nop */
break;
@@ -1827,7 +1785,7 @@ static void h2_session_ev_conn_error(h2_session *session, int arg, const char *m
break;
default:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03401)
"h2_session(%ld): conn error -> shutdown", session->id);
h2_session_shutdown(session, arg, msg, 0);
break;
@@ -1844,7 +1802,7 @@ static void h2_session_ev_proto_error(h2_session *session, int arg, const char *
break;
default:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03402)
"h2_session(%ld): proto error -> shutdown", session->id);
h2_session_shutdown(session, arg, msg, 0);
break;
@@ -1870,47 +1828,52 @@ static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
case H2_SESSION_ST_BUSY:
case H2_SESSION_ST_LOCAL_SHUTDOWN:
case H2_SESSION_ST_REMOTE_SHUTDOWN:
- /* nothing for input and output to do. If we remain
- * in this state, we go into a tight loop and suck up
- * CPU cycles. Ideally, we'd like to do a blocking read, but that
- * is not possible if we have scheduled tasks and wait
- * for them to produce something. */
- if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
- }
- if (h2_ihash_is_empty(session->streams)) {
- if (!is_accepting_streams(session)) {
- /* We are no longer accepting new streams and have
- * finished processing existing ones. Time to leave. */
- h2_session_shutdown(session, arg, msg, 0);
- transit(session, "no io", H2_SESSION_ST_DONE);
+ /* Nothing to READ, nothing to WRITE on the master connection.
+ * Possible causes:
+ * - we wait for the client to send us sth
+ * - we wait for started tasks to produce output
+ * - we have finished all streams and the client has sent GO_AWAY
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session(%ld): NO_IO event, %d streams open",
+ session->id, session->open_streams);
+ if (session->open_streams > 0) {
+ if (has_unsubmitted_streams(session)
+ || has_suspended_streams(session)) {
+ /* waiting for at least one stream to produce data */
+ transit(session, "no io", H2_SESSION_ST_WAIT);
}
else {
- apr_time_t now = apr_time_now();
- /* When we have no streams, no task event are possible,
- * switch to blocking reads */
- transit(session, "no io", H2_SESSION_ST_IDLE);
- session->idle_until = (session->remote.emitted_count?
- session->s->keep_alive_timeout :
- session->s->timeout) + now;
- session->keep_sync_until = now + apr_time_from_sec(1);
+ /* we have streams open, and all are submitted and none
+ * is suspended. The only thing keeping us from WRITEing
+ * more must be the flow control.
+ * This means we only wait for WINDOW_UPDATE from the
+ * client and can block on READ. */
+ transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE);
+ session->idle_until = apr_time_now() + session->s->timeout;
+ session->keep_sync_until = session->idle_until;
+ /* Make sure we have flushed all previously written output
+ * so that the client will react. */
+ if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ return;
+ }
}
}
- else if (!has_unsubmitted_streams(session)
- && !has_suspended_streams(session)) {
- /* none of our streams is waiting for a response or
- * new output data from task processing,
- * switch to blocking reads. We are probably waiting on
- * window updates. */
- transit(session, "no io", H2_SESSION_ST_IDLE);
- session->idle_until = apr_time_now() + session->s->timeout;
- session->keep_sync_until = session->idle_until;
+ else if (is_accepting_streams(session)) {
+ /* When we have no streams, but accept new, switch to idle */
+ apr_time_t now = apr_time_now();
+ transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
+ session->idle_until = (session->remote.emitted_count?
+ session->s->keep_alive_timeout :
+ session->s->timeout) + now;
+ session->keep_sync_until = now + apr_time_from_sec(1);
}
else {
- /* Unable to do blocking reads, as we wait on events from
- * task processing in other threads. Do a busy wait with
- * backoff timer. */
- transit(session, "no io", H2_SESSION_ST_WAIT);
+ /* We are no longer accepting new streams and there are
+ * none left. Time to leave. */
+ h2_session_shutdown(session, arg, msg, 0);
+ transit(session, "no io", H2_SESSION_ST_DONE);
}
break;
default:
@@ -1938,7 +1901,6 @@ static void h2_session_ev_data_read(h2_session *session, int arg, const char *ms
case H2_SESSION_ST_WAIT:
transit(session, "data read", H2_SESSION_ST_BUSY);
break;
- /* fall through */
default:
/* nop */
break;
@@ -1983,6 +1945,37 @@ static void h2_session_ev_pre_close(h2_session *session, int arg, const char *ms
}
}
+static void h2_session_ev_stream_open(h2_session *session, int arg, const char *msg)
+{
+ ++session->open_streams;
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (session->open_streams == 1) {
+ /* enter tiomeout, since we have a stream again */
+ session->idle_until = (session->s->timeout + apr_time_now());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void h2_session_ev_stream_done(h2_session *session, int arg, const char *msg)
+{
+ --session->open_streams;
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (session->open_streams == 0) {
+ /* enter keepalive timeout, since we no longer have streams */
+ session->idle_until = (session->s->keep_alive_timeout
+ + apr_time_now());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static void dispatch_event(h2_session *session, h2_session_event_t ev,
int arg, const char *msg)
{
@@ -2023,6 +2016,12 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev,
case H2_SESSION_EV_PRE_CLOSE:
h2_session_ev_pre_close(session, arg, msg);
break;
+ case H2_SESSION_EV_STREAM_OPEN:
+ h2_session_ev_stream_open(session, arg, msg);
+ break;
+ case H2_SESSION_EV_STREAM_DONE:
+ h2_session_ev_stream_done(session, arg, msg);
+ break;
default:
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_session(%ld): unknown event %d",
@@ -2037,39 +2036,25 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev,
static const int MAX_WAIT_MICROS = 200 * 1000;
-static void update_child_status(h2_session *session, int status, const char *msg)
-{
- /* Assume that we also change code/msg when something really happened and
- * avoid updating the scoreboard in between */
- if (session->last_status_code != status
- || session->last_status_msg != msg) {
- apr_snprintf(session->status, sizeof(session->status),
- "%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)",
- msg? msg : "-",
- (int)h2_ihash_count(session->streams),
- (int)session->remote.emitted_count,
- (int)session->responses_submitted,
- (int)session->pushes_submitted,
- (int)session->pushes_reset + session->streams_reset);
- ap_update_child_status_descr(session->c->sbh, status, session->status);
- }
-}
-
apr_status_t h2_session_process(h2_session *session, int async)
{
apr_status_t status = APR_SUCCESS;
conn_rec *c = session->c;
- int rv, have_written, have_read, mpm_state, no_streams;
+ int rv, mpm_state, trace = APLOGctrace3(c);
- ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_session(%ld): process start, async=%d", session->id, async);
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): process start, async=%d",
+ session->id, async);
+ }
if (c->cs) {
c->cs->state = CONN_STATE_WRITE_COMPLETION;
}
while (1) {
- have_read = have_written = 0;
+ trace = APLOGctrace3(c);
+ session->have_read = session->have_written = 0;
if (!ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
if (mpm_state == AP_MPMQ_STOPPING) {
@@ -2102,14 +2087,15 @@ apr_status_t h2_session_process(h2_session *session, int async)
break;
case H2_SESSION_ST_IDLE:
- no_streams = h2_ihash_is_empty(session->streams);
- update_child_status(session, (no_streams? SERVER_BUSY_KEEPALIVE
- : SERVER_BUSY_READ), "idle");
- /* make certain, the client receives everything before we idle */
- if (!session->keep_sync_until
- && async && no_streams && !session->r && session->remote.emitted_count) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_session(%ld): async idle, nonblock read", session->id);
+ /* make certain, we send everything before we idle */
+ if (!session->keep_sync_until && async && !session->open_streams
+ && !session->r && session->remote.emitted_count) {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): async idle, nonblock read, "
+ "%d streams open", session->id,
+ session->open_streams);
+ }
/* We do not return to the async mpm immediately, since under
* load, mpms show the tendency to throw keep_alive connections
* away very rapidly.
@@ -2122,7 +2108,7 @@ apr_status_t h2_session_process(h2_session *session, int async)
status = h2_session_read(session, 0);
if (status == APR_SUCCESS) {
- have_read = 1;
+ session->have_read = 1;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
@@ -2136,12 +2122,19 @@ apr_status_t h2_session_process(h2_session *session, int async)
}
else {
ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
+ APLOGNO(03403)
"h2_session(%ld): idle, no data, error",
session->id);
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "timeout");
}
}
else {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): sync idle, stutter 1-sec, "
+ "%d streams open", session->id,
+ session->open_streams);
+ }
/* We wait in smaller increments, using a 1 second timeout.
* That gives us the chance to check for MPMQ_STOPPING often.
*/
@@ -2153,7 +2146,7 @@ apr_status_t h2_session_process(h2_session *session, int async)
h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1));
status = h2_session_read(session, 1);
if (status == APR_SUCCESS) {
- have_read = 1;
+ session->have_read = 1;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
else if (status == APR_EAGAIN) {
@@ -2167,11 +2160,26 @@ apr_status_t h2_session_process(h2_session *session, int async)
session->keep_sync_until = 0;
}
if (now > session->idle_until) {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): keepalive timeout",
+ session->id);
+ }
dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
}
+ else if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): keepalive, %f sec left",
+ session->id, (session->idle_until - now) / 1000000.0f);
+ }
/* continue reading handling */
}
else {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): idle(1 sec timeout) "
+ "read failed", session->id);
+ }
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
}
}
@@ -2186,8 +2194,7 @@ apr_status_t h2_session_process(h2_session *session, int async)
h2_filter_cin_timeout_set(session->cin, session->s->timeout);
status = h2_session_read(session, 0);
if (status == APR_SUCCESS) {
- have_read = 1;
- update_child_status(session, SERVER_BUSY_READ, "busy");
+ session->have_read = 1;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
else if (status == APR_EAGAIN) {
@@ -2202,42 +2209,32 @@ apr_status_t h2_session_process(h2_session *session, int async)
}
}
- if (!h2_ihash_is_empty(session->streams)) {
- /* resume any streams for which data is available again */
- h2_session_resume_streams_with_data(session);
- /* Submit any responses/push_promises that are ready */
- status = h2_session_submit(session);
- if (status == APR_SUCCESS) {
- have_written = 1;
- }
- else if (status != APR_EAGAIN) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR, "submit error");
- break;
- }
- /* send out window updates for our inputs */
- status = h2_mplx_in_update_windows(session->mplx);
- if (status != APR_SUCCESS && status != APR_EAGAIN) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR, "window update error");
- break;
- }
+ /* trigger window updates, stream resumes and submits */
+ status = h2_mplx_dispatch_master_events(session->mplx,
+ on_stream_resume,
+ on_stream_response,
+ session);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): dispatch error",
+ session->id);
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR,
+ "dispatch error");
+ break;
}
if (nghttp2_session_want_write(session->ngh2)) {
ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
status = h2_session_send(session);
- if (status == APR_SUCCESS) {
- have_written = 1;
- }
- else {
+ if (status != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR, "writing");
+ H2_ERR_INTERNAL_ERROR, "writing");
break;
}
}
- if (have_read || have_written) {
+ if (session->have_read || session->have_written) {
if (session->wait_us) {
session->wait_us = 0;
}
@@ -2253,13 +2250,15 @@ apr_status_t h2_session_process(h2_session *session, int async)
session->start_wait = apr_time_now();
if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ break;
}
- update_child_status(session, SERVER_BUSY_READ, "wait");
}
else if ((apr_time_now() - session->start_wait) >= session->s->timeout) {
/* waited long enough */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, APR_TIMEUP, c,
- "h2_session: wait for data");
+ if (trace) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, APR_TIMEUP, c,
+ "h2_session: wait for data");
+ }
dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
break;
}
@@ -2268,8 +2267,8 @@ apr_status_t h2_session_process(h2_session *session, int async)
session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS);
}
- if (APLOGctrace1(c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ if (trace) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
"h2_session: wait for data, %ld micros",
(long)session->wait_us);
}
@@ -2279,13 +2278,18 @@ apr_status_t h2_session_process(h2_session *session, int async)
session->wait_us = 0;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
- else if (status == APR_TIMEUP) {
+ else if (APR_STATUS_IS_TIMEUP(status)) {
/* go back to checking all inputs again */
transit(session, "wait cycle", session->local.accepting?
H2_SESSION_ST_BUSY : H2_SESSION_ST_LOCAL_SHUTDOWN);
}
+ else if (APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_ECONNABORTED(status)) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
else {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c,
+ APLOGNO(03404)
"h2_session(%ld): waiting on conditional",
session->id);
h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR,
@@ -2294,7 +2298,6 @@ apr_status_t h2_session_process(h2_session *session, int async)
break;
case H2_SESSION_ST_DONE:
- update_child_status(session, SERVER_CLOSING, "done");
status = APR_EOF;
goto out;
@@ -2317,10 +2320,12 @@ apr_status_t h2_session_process(h2_session *session, int async)
}
out:
- ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_session(%ld): [%s] process returns",
- session->id, state_name(session->state));
-
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): [%s] process returns",
+ session->id, state_name(session->state));
+ }
+
if ((session->state != H2_SESSION_ST_DONE)
&& (APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_ECONNRESET(status)
diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
index ea5f82a3..c5c5b7ae 100644
--- a/modules/http2/h2_session.h
+++ b/modules/http2/h2_session.h
@@ -70,6 +70,8 @@ typedef enum {
H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
+ H2_SESSION_EV_STREAM_OPEN, /* stream has been opened */
+ H2_SESSION_EV_STREAM_DONE, /* stream has been handled completely */
} h2_session_event_t;
typedef struct h2_session {
@@ -96,10 +98,13 @@ typedef struct h2_session {
unsigned int reprioritize : 1; /* scheduled streams priority changed */
unsigned int eoc_written : 1; /* h2 eoc bucket written */
unsigned int flush : 1; /* flushing output necessary */
+ unsigned int have_read : 1; /* session has read client data */
+ unsigned int have_written : 1; /* session did write data to client */
apr_interval_time_t wait_us; /* timout during BUSY_WAIT state, micro secs */
struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
+ int open_streams; /* number of streams open */
int unsent_submits; /* number of submitted, but not yet written responses. */
int unsent_promises; /* number of submitted, but not yet written push promised */
@@ -122,8 +127,6 @@ typedef struct h2_session {
apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */
struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */
- apr_pool_t *spare; /* spare stream pool */
-
char status[64]; /* status message for scoreboard */
int last_status_code; /* the one already reported */
const char *last_status_msg; /* the one already reported */
@@ -190,17 +193,19 @@ void h2_session_close(h2_session *session);
apr_status_t h2_session_handle_response(h2_session *session,
struct h2_stream *stream);
-/* Get the h2_stream for the given stream idenrtifier. */
-struct h2_stream *h2_session_get_stream(h2_session *session, int stream_id);
-
/**
* Create and register a new stream under the given id.
*
* @param session the session to register in
* @param stream_id the new stream identifier
+ * @param initiated_on the stream id this one is initiated on or 0
+ * @param req the request for this stream or NULL if not known yet
* @return the new stream
*/
-struct h2_stream *h2_session_open_stream(h2_session *session, int stream_id);
+struct h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+ int initiated_on,
+ const h2_request *req);
+
/**
* Returns if client settings have push enabled.
@@ -213,8 +218,8 @@ int h2_session_push_enabled(h2_session *session);
* @param session the session to which the stream belongs
* @param stream the stream to destroy
*/
-apr_status_t h2_session_stream_destroy(h2_session *session,
- struct h2_stream *stream);
+apr_status_t h2_session_stream_done(h2_session *session,
+ struct h2_stream *stream);
/**
* Submit a push promise on the stream and schedule the new steam for
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
index 0a1dadf9..a7a67641 100644
--- a/modules/http2/h2_stream.c
+++ b/modules/http2/h2_stream.c
@@ -24,6 +24,8 @@
#include <nghttp2/nghttp2.h>
#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
#include "h2_conn.h"
#include "h2_config.h"
#include "h2_h2.h"
@@ -36,7 +38,6 @@
#include "h2_stream.h"
#include "h2_task.h"
#include "h2_ctx.h"
-#include "h2_task_input.h"
#include "h2_task.h"
#include "h2_util.h"
@@ -52,6 +53,20 @@ static int state_transition[][7] = {
/*CL*/{ 1, 1, 0, 0, 1, 1, 1 },
};
+static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, char *tag)
+{
+ if (APLOG_C_IS_LEVEL(s->session->c, lvl)) {
+ conn_rec *c = s->session->c;
+ char buffer[4 * 1024];
+ const char *line = "(null)";
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
+
+ len = h2_util_bb_print(buffer, bmax, tag, "", s->buffer);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%ld-%d): %s",
+ c->id, s->id, len? buffer : line);
+ }
+}
+
static int set_state(h2_stream *stream, h2_stream_state_t state)
{
int allowed = state_transition[state][stream->state];
@@ -135,37 +150,97 @@ static int output_open(h2_stream *stream)
}
}
-static h2_sos *h2_sos_mplx_create(h2_stream *stream, h2_response *response);
+static apr_status_t stream_pool_cleanup(void *ctx)
+{
+ h2_stream *stream = ctx;
+ apr_status_t status;
+
+ if (stream->input) {
+ h2_beam_destroy(stream->input);
+ stream->input = NULL;
+ }
+ if (stream->files) {
+ apr_file_t *file;
+ int i;
+ for (i = 0; i < stream->files->nelts; ++i) {
+ file = APR_ARRAY_IDX(stream->files, i, apr_file_t*);
+ status = apr_file_close(file);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, stream->session->c,
+ "h2_stream(%ld-%d): destroy, closed file %d",
+ stream->session->id, stream->id, i);
+ }
+ stream->files = NULL;
+ }
+ return APR_SUCCESS;
+}
-h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session)
+h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
+ int initiated_on, const h2_request *creq)
{
+ h2_request *req;
h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
+
stream->id = id;
stream->state = H2_STREAM_ST_IDLE;
stream->pool = pool;
stream->session = session;
set_state(stream, H2_STREAM_ST_OPEN);
- stream->request = h2_request_create(id, pool,
- h2_config_geti(session->config, H2_CONF_SER_HEADERS));
+ if (creq) {
+ /* take it into out pool and assure correct id's */
+ req = h2_request_clone(pool, creq);
+ req->id = id;
+ req->initiated_on = initiated_on;
+ }
+ else {
+ req = h2_req_create(id, pool,
+ h2_config_geti(session->config, H2_CONF_SER_HEADERS));
+ }
+ stream->request = req;
+
+ apr_pool_cleanup_register(pool, stream, stream_pool_cleanup,
+ apr_pool_cleanup_null);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082)
"h2_stream(%ld-%d): opened", session->id, stream->id);
return stream;
}
-apr_status_t h2_stream_destroy(h2_stream *stream)
+void h2_stream_cleanup(h2_stream *stream)
+{
+ AP_DEBUG_ASSERT(stream);
+ if (stream->buffer) {
+ apr_brigade_cleanup(stream->buffer);
+ }
+ if (stream->input) {
+ apr_status_t status;
+ status = h2_beam_shutdown(stream->input, APR_NONBLOCK_READ, 1);
+ if (status == APR_EAGAIN) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_stream(%ld-%d): wait on input shutdown",
+ stream->session->id, stream->id);
+ status = h2_beam_shutdown(stream->input, APR_BLOCK_READ, 1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
+ "h2_stream(%ld-%d): input shutdown returned",
+ stream->session->id, stream->id);
+ }
+ }
+}
+
+void h2_stream_destroy(h2_stream *stream)
{
AP_DEBUG_ASSERT(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c,
+ "h2_stream(%ld-%d): destroy",
+ stream->session->id, stream->id);
if (stream->pool) {
apr_pool_destroy(stream->pool);
}
- return APR_SUCCESS;
}
-void h2_stream_cleanup(h2_stream *stream)
+void h2_stream_eos_destroy(h2_stream *stream)
{
- h2_session_stream_destroy(stream->session, stream);
- /* stream is gone */
+ h2_session_stream_done(stream->session, stream);
+ /* stream possibly destroyed */
}
apr_pool_t *h2_stream_detach_pool(h2_stream *stream)
@@ -187,33 +262,7 @@ void h2_stream_rst(h2_stream *stream, int error_code)
struct h2_response *h2_stream_get_response(h2_stream *stream)
{
- return stream->sos? stream->sos->response : NULL;
-}
-
-apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response,
- apr_bucket_brigade *bb)
-{
- apr_status_t status = APR_SUCCESS;
- h2_sos *sos;
-
- if (!output_open(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): output closed",
- stream->session->id, stream->id);
- return APR_ECONNRESET;
- }
-
- sos = h2_sos_mplx_create(stream, response);
- if (sos->response->sos_filter) {
- sos = h2_filter_sos_create(sos->response->sos_filter, sos);
- }
- stream->sos = sos;
-
- status = stream->sos->buffer(stream->sos, bb);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
- "h2_stream(%ld-%d): set_response(%d)",
- stream->session->id, stream->id, stream->sos->response->http_status);
- return status;
+ return stream->response;
}
apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r)
@@ -224,26 +273,57 @@ apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r)
return APR_ECONNRESET;
}
set_state(stream, H2_STREAM_ST_OPEN);
- status = h2_request_rwrite(stream->request, r);
+ status = h2_request_rwrite(stream->request, stream->pool, r);
stream->request->serialize = h2_config_geti(h2_config_rget(r),
H2_CONF_SER_HEADERS);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03058)
+ "h2_request(%d): rwrite %s host=%s://%s%s",
+ stream->request->id, stream->request->method,
+ stream->request->scheme, stream->request->authority,
+ stream->request->path);
return status;
}
-void h2_stream_set_h2_request(h2_stream *stream, int initiated_on,
- const h2_request *req)
-{
- h2_request_copy(stream->pool, stream->request, req);
- stream->request->initiated_on = initiated_on;
- stream->request->eoh = 0;
-}
-
apr_status_t h2_stream_add_header(h2_stream *stream,
const char *name, size_t nlen,
const char *value, size_t vlen)
{
AP_DEBUG_ASSERT(stream);
+ if (!stream->response) {
+ if (name[0] == ':') {
+ if ((vlen) > stream->session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): pseudo header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_URI_TOO_LARGE);
+ }
+ }
+ else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) {
+ /* header too long */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+
+ if (name[0] != ':') {
+ ++stream->request_headers_added;
+ if (stream->request_headers_added
+ > stream->session->s->limit_req_fields) {
+ /* too many header lines */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): too many header lines",
+ stream->session->id, stream->id);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+ }
+ }
+
if (h2_stream_is_scheduled(stream)) {
return h2_request_add_trailer(stream->request, stream->pool,
name, nlen, value, vlen);
@@ -275,21 +355,22 @@ apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
close_input(stream);
}
+ if (stream->response) {
+ /* already have a resonse, probably a HTTP error code */
+ return h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
+ }
+
/* Seeing the end-of-headers, we have everything we need to
* start processing it.
*/
status = h2_request_end_headers(stream->request, stream->pool,
eos, push_enabled);
if (status == APR_SUCCESS) {
- if (!eos) {
- stream->request->body = 1;
- }
- stream->input_remaining = stream->request->content_length;
-
- status = h2_mplx_process(stream->session->mplx, stream->id,
- stream->request, cmp, ctx);
+ stream->request->body = !eos;
stream->scheduled = 1;
+ stream->input_remaining = stream->request->content_length;
+ status = h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): scheduled %s %s://%s%s",
stream->session->id, stream->id,
@@ -298,7 +379,7 @@ apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
}
else {
h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
"h2_stream(%ld-%d): RST=2 (internal err) %s %s://%s%s",
stream->session->id, stream->id,
stream->request->method, stream->request->scheme,
@@ -326,8 +407,8 @@ apr_status_t h2_stream_close_input(h2_stream *stream)
return APR_ECONNRESET;
}
- if (close_input(stream)) {
- status = h2_mplx_in_close(stream->session->mplx, stream->id);
+ if (close_input(stream) && stream->input) {
+ status = h2_beam_close(stream->input);
}
return status;
}
@@ -335,25 +416,29 @@ apr_status_t h2_stream_close_input(h2_stream *stream)
apr_status_t h2_stream_write_data(h2_stream *stream,
const char *data, size_t len, int eos)
{
+ conn_rec *c = stream->session->c;
apr_status_t status = APR_SUCCESS;
AP_DEBUG_ASSERT(stream);
+ if (!stream->input) {
+ return APR_EOF;
+ }
if (input_closed(stream) || !stream->request->eoh) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d",
stream->session->id, stream->id, input_closed(stream),
stream->request->eoh);
return APR_EINVAL;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_stream(%ld-%d): add %ld input bytes",
stream->session->id, stream->id, (long)len);
if (!stream->request->chunked) {
stream->input_remaining -= len;
if (stream->input_remaining < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
APLOGNO(02961)
"h2_stream(%ld-%d): got %ld more content bytes than announced "
"in content-length header: %ld",
@@ -365,10 +450,18 @@ apr_status_t h2_stream_write_data(h2_stream *stream,
}
}
- status = h2_mplx_in_write(stream->session->mplx, stream->id, data, len, eos);
+ if (!stream->tmp) {
+ stream->tmp = apr_brigade_create(stream->pool, c->bucket_alloc);
+ }
+ apr_brigade_write(stream->tmp, NULL, NULL, data, len);
if (eos) {
+ APR_BRIGADE_INSERT_TAIL(stream->tmp,
+ apr_bucket_eos_create(c->bucket_alloc));
close_input(stream);
}
+
+ status = h2_beam_send(stream->input, stream->tmp, APR_BLOCK_READ);
+ apr_brigade_cleanup(stream->tmp);
return status;
}
@@ -387,44 +480,160 @@ int h2_stream_is_suspended(const h2_stream *stream)
return stream->suspended;
}
-apr_status_t h2_stream_out_prepare(h2_stream *stream,
- apr_off_t *plen, int *peos)
+static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
{
- if (stream->rst_error) {
- *plen = 0;
- *peos = 1;
+ conn_rec *c = stream->session->c;
+ apr_bucket *b;
+ apr_status_t status;
+
+ if (!stream->output) {
+ return APR_EOF;
+ }
+ status = h2_beam_receive(stream->output, stream->buffer,
+ APR_NONBLOCK_READ, amount);
+ /* The buckets we reveive are using the stream->buffer pool as
+ * lifetime which is exactly what we want since this is stream->pool.
+ *
+ * However: when we send these buckets down the core output filters, the
+ * filter might decide to setaside them into a pool of its own. And it
+ * might decide, after having sent the buckets, to clear its pool.
+ *
+ * This is problematic for file buckets because it then closed the contained
+ * file. Any split off buckets we sent afterwards will result in a
+ * APR_EBADF.
+ */
+ for (b = APR_BRIGADE_FIRST(stream->buffer);
+ b != APR_BRIGADE_SENTINEL(stream->buffer);
+ b = APR_BUCKET_NEXT(b)) {
+ if (APR_BUCKET_IS_FILE(b)) {
+ apr_bucket_file *f = (apr_bucket_file *)b->data;
+ apr_pool_t *fpool = apr_file_pool_get(f->fd);
+ if (fpool != c->pool) {
+ apr_bucket_setaside(b, c->pool);
+ if (!stream->files) {
+ stream->files = apr_array_make(stream->pool,
+ 5, sizeof(apr_file_t*));
+ }
+ APR_ARRAY_PUSH(stream->files, apr_file_t*) = f->fd;
+ }
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response,
+ h2_bucket_beam *output)
+{
+ apr_status_t status = APR_SUCCESS;
+ conn_rec *c = stream->session->c;
+
+ if (!output_open(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_stream(%ld-%d): output closed",
+ stream->session->id, stream->id);
return APR_ECONNRESET;
}
+
+ stream->response = response;
+ stream->output = output;
+ stream->buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
+
+ h2_stream_filter(stream);
+ if (stream->output) {
+ status = fill_buffer(stream, 0);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_stream(%ld-%d): set_response(%d)",
+ stream->session->id, stream->id,
+ stream->response->http_status);
+ return status;
+}
- AP_DEBUG_ASSERT(stream->sos);
- return stream->sos->prepare(stream->sos, plen, peos);
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status)
+{
+ h2_response *response;
+
+ if (stream->submitted) {
+ return APR_EINVAL;
+ }
+ response = h2_response_die(stream->id, http_status, stream->request,
+ stream->pool);
+ return h2_stream_set_response(stream, response, NULL);
}
-apr_status_t h2_stream_readx(h2_stream *stream,
- h2_io_data_cb *cb, void *ctx,
- apr_off_t *plen, int *peos)
+static const apr_size_t DATA_CHUNK_SIZE = ((16*1024) - 100 - 9);
+
+apr_status_t h2_stream_out_prepare(h2_stream *stream,
+ apr_off_t *plen, int *peos)
{
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+ apr_off_t requested;
+
if (stream->rst_error) {
+ *plen = 0;
+ *peos = 1;
return APR_ECONNRESET;
}
- if (!stream->sos) {
- return APR_EGENERAL;
+
+ if (*plen > 0) {
+ requested = H2MIN(*plen, DATA_CHUNK_SIZE);
}
- return stream->sos->readx(stream->sos, cb, ctx, plen, peos);
+ else {
+ requested = DATA_CHUNK_SIZE;
+ }
+ *plen = requested;
+
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_pre");
+ h2_util_bb_avail(stream->buffer, plen, peos);
+ if (!*peos && *plen < requested) {
+ /* try to get more data */
+ status = fill_buffer(stream, (requested - *plen) + DATA_CHUNK_SIZE);
+ if (APR_STATUS_IS_EOF(status)) {
+ apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->buffer, eos);
+ status = APR_SUCCESS;
+ }
+ else if (status == APR_EAGAIN) {
+ /* did not receive more, it's ok */
+ status = APR_SUCCESS;
+ }
+ *plen = requested;
+ h2_util_bb_avail(stream->buffer, plen, peos);
+ }
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_post");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_stream(%ld-%d): prepare, len=%ld eos=%d, trailers=%s",
+ c->id, stream->id, (long)*plen, *peos,
+ (stream->response && stream->response->trailers)?
+ "yes" : "no");
+ if (!*peos && !*plen && status == APR_SUCCESS) {
+ return APR_EAGAIN;
+ }
+ return status;
}
+
apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
apr_off_t *plen, int *peos)
{
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+
if (stream->rst_error) {
return APR_ECONNRESET;
}
- if (!stream->sos) {
- return APR_EGENERAL;
+ status = h2_append_brigade(bb, stream->buffer, plen, peos);
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ status = APR_EAGAIN;
}
- return stream->sos->read_to(stream->sos, bb, plen, peos);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+ "h2_stream(%ld-%d): read_to, len=%ld eos=%d",
+ c->id, stream->id, (long)*plen, *peos);
+ return status;
}
+
int h2_stream_input_is_open(const h2_stream *stream)
{
return input_open(stream);
@@ -469,7 +678,7 @@ apr_status_t h2_stream_submit_pushes(h2_stream *stream)
apr_table_t *h2_stream_get_trailers(h2_stream *stream)
{
- return stream->sos? stream->sos->get_trailers(stream->sos) : NULL;
+ return stream->response? stream->response->trailers : NULL;
}
const h2_priority *h2_stream_get_priority(h2_stream *stream)
@@ -486,147 +695,3 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream)
return NULL;
}
-/*******************************************************************************
- * h2_sos_mplx
- ******************************************************************************/
-
-typedef struct h2_sos_mplx {
- h2_mplx *m;
- apr_bucket_brigade *bb;
- apr_bucket_brigade *tmp;
- apr_table_t *trailers;
- apr_off_t buffer_size;
-} h2_sos_mplx;
-
-#define H2_SOS_MPLX_OUT(lvl,msos,msg) \
- do { \
- if (APLOG_C_IS_LEVEL((msos)->m->c,lvl)) \
- h2_util_bb_log((msos)->m->c,(msos)->m->id,lvl,msg,(msos)->bb); \
- } while(0)
-
-
-static apr_status_t mplx_transfer(h2_sos_mplx *msos, int stream_id,
- apr_pool_t *pool)
-{
- apr_status_t status;
- apr_table_t *trailers = NULL;
-
- if (!msos->tmp) {
- msos->tmp = apr_brigade_create(msos->bb->p, msos->bb->bucket_alloc);
- }
- status = h2_mplx_out_get_brigade(msos->m, stream_id, msos->tmp,
- msos->buffer_size-1, &trailers);
- if (!APR_BRIGADE_EMPTY(msos->tmp)) {
- h2_transfer_brigade(msos->bb, msos->tmp, pool);
- }
- if (trailers) {
- msos->trailers = trailers;
- }
- return status;
-}
-
-static apr_status_t h2_sos_mplx_read_to(h2_sos *sos, apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos)
-{
- h2_sos_mplx *msos = sos->ctx;
- apr_status_t status;
-
- status = h2_append_brigade(bb, msos->bb, plen, peos);
- if (status == APR_SUCCESS && !*peos && !*plen) {
- status = APR_EAGAIN;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, msos->m->c,
- "h2_stream(%ld-%d): read_to, len=%ld eos=%d",
- msos->m->id, sos->stream->id, (long)*plen, *peos);
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c,
- "h2_stream(%ld-%d): read_to, len=%ld eos=%d",
- msos->m->id, sos->stream->id, (long)*plen, *peos);
- return status;
-}
-
-static apr_status_t h2_sos_mplx_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx,
- apr_off_t *plen, int *peos)
-{
- h2_sos_mplx *msos = sos->ctx;
- apr_status_t status = APR_SUCCESS;
-
- status = h2_util_bb_readx(msos->bb, cb, ctx, plen, peos);
- if (status == APR_SUCCESS && !*peos && !*plen) {
- status = APR_EAGAIN;
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, msos->m->c,
- "h2_stream(%ld-%d): readx, len=%ld eos=%d",
- msos->m->id, sos->stream->id, (long)*plen, *peos);
- return status;
-}
-
-static apr_status_t h2_sos_mplx_prepare(h2_sos *sos, apr_off_t *plen, int *peos)
-{
- h2_sos_mplx *msos = sos->ctx;
- apr_status_t status = APR_SUCCESS;
-
- H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prepare_pre");
-
- if (APR_BRIGADE_EMPTY(msos->bb)) {
- status = mplx_transfer(msos, sos->stream->id, sos->stream->pool);
- }
- h2_util_bb_avail(msos->bb, plen, peos);
-
- H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prepare_post");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c,
- "h2_stream(%ld-%d): prepare, len=%ld eos=%d, trailers=%s",
- msos->m->id, sos->stream->id, (long)*plen, *peos,
- msos->trailers? "yes" : "no");
- if (!*peos && !*plen) {
- status = APR_EAGAIN;
- }
-
- return status;
-}
-
-static apr_table_t *h2_sos_mplx_get_trailers(h2_sos *sos)
-{
- h2_sos_mplx *msos = sos->ctx;
-
- return msos->trailers;
-}
-
-static apr_status_t h2_sos_mplx_buffer(h2_sos *sos, apr_bucket_brigade *bb)
-{
- h2_sos_mplx *msos = sos->ctx;
- apr_status_t status = APR_SUCCESS;
-
- if (bb && !APR_BRIGADE_EMPTY(bb)) {
- H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx set_response_pre");
- status = mplx_transfer(msos, sos->stream->id, sos->stream->pool);
- H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx set_response_post");
- }
- return status;
-}
-
-static h2_sos *h2_sos_mplx_create(h2_stream *stream, h2_response *response)
-{
- h2_sos *sos;
- h2_sos_mplx *msos;
-
- msos = apr_pcalloc(stream->pool, sizeof(*msos));
- msos->m = stream->session->mplx;
- msos->bb = apr_brigade_create(stream->pool, msos->m->c->bucket_alloc);
- msos->buffer_size = 32 * 1024;
-
- sos = apr_pcalloc(stream->pool, sizeof(*sos));
- sos->stream = stream;
- sos->response = response;
-
- sos->ctx = msos;
- sos->buffer = h2_sos_mplx_buffer;
- sos->prepare = h2_sos_mplx_prepare;
- sos->readx = h2_sos_mplx_readx;
- sos->read_to = h2_sos_mplx_read_to;
- sos->get_trailers = h2_sos_mplx_get_trailers;
-
- sos->response = response;
-
- return sos;
-}
-
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
index f0cd2167..f80f8115 100644
--- a/modules/http2/h2_stream.h
+++ b/modules/http2/h2_stream.h
@@ -30,7 +30,6 @@
* The h2_response gives the HEADER frames to sent to the client, followed
* by DATA frames read from the h2_stream until EOS is reached.
*/
-#include "h2_io.h"
struct h2_mplx;
struct h2_priority;
@@ -38,6 +37,7 @@ struct h2_request;
struct h2_response;
struct h2_session;
struct h2_sos;
+struct h2_bucket_beam;
typedef struct h2_stream h2_stream;
@@ -48,16 +48,23 @@ struct h2_stream {
apr_pool_t *pool; /* the memory pool for this stream */
struct h2_request *request; /* the request made in this stream */
- int rst_error; /* stream error for RST_STREAM */
+ struct h2_bucket_beam *input;
+ int request_headers_added; /* number of request headers added */
+ struct h2_response *response;
+ struct h2_bucket_beam *output;
+ apr_bucket_brigade *buffer;
+ apr_bucket_brigade *tmp;
+ apr_array_header_t *files; /* apr_file_t* we collected during I/O */
+
+ int rst_error; /* stream error for RST_STREAM */
unsigned int aborted : 1; /* was aborted */
unsigned int suspended : 1; /* DATA sending has been suspended */
unsigned int scheduled : 1; /* stream has been scheduled */
+ unsigned int started : 1; /* stream has started processing */
unsigned int submitted : 1; /* response HEADER has been sent */
apr_off_t input_remaining; /* remaining bytes on input as advertised via content-length */
-
- struct h2_sos *sos; /* stream output source, e.g. to read output from */
apr_off_t data_frames_sent; /* # of DATA frames sent out for this stream */
};
@@ -71,15 +78,18 @@ struct h2_stream {
* @param session the session this stream belongs to
* @return the newly opened stream
*/
-h2_stream *h2_stream_open(int id, apr_pool_t *pool, struct h2_session *session);
+h2_stream *h2_stream_open(int id, apr_pool_t *pool, struct h2_session *session,
+ int initiated_on, const struct h2_request *req);
/**
- * Destroy any resources held by this stream. Will destroy memory pool
- * if still owned by the stream.
- *
- * @param stream the stream to destroy
+ * Cleanup any resources still held by the stream, called by last bucket.
+ */
+void h2_stream_eos_destroy(h2_stream *stream);
+
+/**
+ * Destroy memory pool if still owned by the stream.
*/
-apr_status_t h2_stream_destroy(h2_stream *stream);
+void h2_stream_destroy(h2_stream *stream);
/**
* Removes stream from h2_session and destroys it.
@@ -93,7 +103,7 @@ void h2_stream_cleanup(h2_stream *stream);
* destruction to take the pool with it.
*
* @param stream the stream to detach the pool from
- * @param the detached memmory pool or NULL if stream no longer has one
+ * @result the detached memory pool or NULL if stream no longer has one
*/
apr_pool_t *h2_stream_detach_pool(h2_stream *stream);
@@ -106,15 +116,6 @@ apr_pool_t *h2_stream_detach_pool(h2_stream *stream);
*/
apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r);
-/**
- * Initialize stream->request with the given h2_request.
- *
- * @param stream the stream to init the request for
- * @param req the request for initializing, will be copied
- */
-void h2_stream_set_h2_request(h2_stream *stream, int initiated_on,
- const struct h2_request *req);
-
/*
* Add a HTTP/2 header (including pseudo headers) or trailer
* to the given stream, depending on stream state.
@@ -152,7 +153,7 @@ apr_status_t h2_stream_write_data(h2_stream *stream,
* @param stream the stream to reset
* @param error_code the HTTP/2 error code
*/
-void h2_stream_rst(h2_stream *streamm, int error_code);
+void h2_stream_rst(h2_stream *stream, int error_code);
/**
* Schedule the stream for execution. All header information must be
@@ -181,13 +182,18 @@ struct h2_response *h2_stream_get_response(h2_stream *stream);
* the stream response has been collected.
*
* @param stream the stream to set the response for
- * @param resonse the response data for the stream
+ * @param response the response data for the stream
* @param bb bucket brigade with output data for the stream. Optional,
* may be incomplete.
*/
apr_status_t h2_stream_set_response(h2_stream *stream,
struct h2_response *response,
- apr_bucket_brigade *bb);
+ struct h2_bucket_beam *output);
+
+/**
+ * Set the HTTP error status as response.
+ */
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status);
/**
* Do a speculative read on the stream output to determine the
@@ -206,23 +212,6 @@ apr_status_t h2_stream_out_prepare(h2_stream *stream,
apr_off_t *plen, int *peos);
/**
- * Read data from the stream output.
- *
- * @param stream the stream to read from
- * @param cb callback to invoke for byte chunks read. Might be invoked
- * multiple times (with different values) for one read operation.
- * @param ctx context data for callback
- * @param plen (in-/out) max. number of bytes to read and on return actual
- * number of bytes read
- * @param peos (out) != 0 iff end of stream has been reached while reading
- * @return APR_SUCCESS if out information was computed successfully.
- * APR_EAGAIN if not data is available and end of stream has not been
- * reached yet.
- */
-apr_status_t h2_stream_readx(h2_stream *stream, h2_io_data_cb *cb,
- void *ctx, apr_off_t *plen, int *peos);
-
-/**
* Read a maximum number of bytes into the bucket brigade.
*
* @param stream the stream to read from
diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c
index dff1bcdd..381d0b1c 100644
--- a/modules/http2/h2_task.c
+++ b/modules/http2/h2_task.c
@@ -33,6 +33,8 @@
#include <scoreboard.h>
#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
#include "h2_conn.h"
#include "h2_config.h"
#include "h2_ctx.h"
@@ -42,12 +44,439 @@
#include "h2_request.h"
#include "h2_session.h"
#include "h2_stream.h"
-#include "h2_task_input.h"
-#include "h2_task_output.h"
#include "h2_task.h"
-#include "h2_ctx.h"
#include "h2_worker.h"
+#include "h2_util.h"
+
+/*******************************************************************************
+ * task input handling
+ ******************************************************************************/
+
+static int input_ser_header(void *ctx, const char *name, const char *value)
+{
+ h2_task *task = ctx;
+ apr_brigade_printf(task->input.bb, NULL, NULL, "%s: %s\r\n", name, value);
+ return 1;
+}
+
+static void make_chunk(h2_task *task, apr_bucket_brigade *bb,
+ apr_bucket *first, apr_uint64_t chunk_len,
+ apr_bucket *tail)
+{
+ /* Surround the buckets [first, tail[ with new buckets carrying the
+ * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
+ * to the end of the brigade. */
+ char buffer[128];
+ apr_bucket *c;
+ int len;
+
+ len = apr_snprintf(buffer, H2_ALEN(buffer),
+ "%"APR_UINT64_T_HEX_FMT"\r\n", chunk_len);
+ c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(first, c);
+ c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc);
+ if (tail) {
+ APR_BUCKET_INSERT_BEFORE(tail, c);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(bb, c);
+ }
+}
+
+static apr_status_t input_handle_eos(h2_task *task, request_rec *r,
+ apr_bucket *b)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_brigade *bb = task->input.bb;
+ apr_table_t *t = task->request? task->request->trailers : NULL;
+
+ if (task->input.chunked) {
+ task->input.tmp = apr_brigade_split_ex(bb, b, task->input.tmp);
+ if (t && !apr_is_empty_table(t)) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+ apr_table_do(input_ser_header, task, t, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+ APR_BRIGADE_CONCAT(bb, task->input.tmp);
+ }
+ else if (r && t && !apr_is_empty_table(t)){
+ /* trailers passed in directly. */
+ apr_table_overlap(r->trailers_in, t, APR_OVERLAP_TABLES_SET);
+ }
+ task->input.eos_written = 1;
+ return status;
+}
+
+static apr_status_t input_append_eos(h2_task *task, request_rec *r)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_brigade *bb = task->input.bb;
+ apr_table_t *t = task->request? task->request->trailers : NULL;
+
+ if (task->input.chunked) {
+ if (t && !apr_is_empty_table(t)) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+ apr_table_do(input_ser_header, task, t, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+ }
+ else if (r && t && !apr_is_empty_table(t)){
+ /* trailers passed in directly. */
+ apr_table_overlap(r->trailers_in, t, APR_OVERLAP_TABLES_SET);
+ }
+ APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(bb->bucket_alloc));
+ task->input.eos_written = 1;
+ return status;
+}
+
+static apr_status_t input_read(h2_task *task, ap_filter_t* f,
+ apr_bucket_brigade* bb, ap_input_mode_t mode,
+ apr_read_type_e block, apr_off_t readbytes)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next, *first_data;
+ apr_off_t bblen = 0;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): read, mode=%d, block=%d, readbytes=%ld",
+ task->id, mode, block, (long)readbytes);
+
+ if (mode == AP_MODE_INIT) {
+ return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
+ }
+
+ if (f->c->aborted || !task->request) {
+ return APR_ECONNABORTED;
+ }
+
+ if (!task->input.bb) {
+ if (!task->input.eos_written) {
+ input_append_eos(task, f->r);
+ return APR_SUCCESS;
+ }
+ return APR_EOF;
+ }
+
+ /* Cleanup brigades from those nasty 0 length non-meta buckets
+ * that apr_brigade_split_line() sometimes produces. */
+ for (b = APR_BRIGADE_FIRST(task->input.bb);
+ b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) {
+ apr_bucket_delete(b);
+ }
+ }
+
+ while (APR_BRIGADE_EMPTY(task->input.bb) && !task->input.eos) {
+ /* Get more input data for our request. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): get more data from mplx, block=%d, "
+ "readbytes=%ld, queued=%ld",
+ task->id, block, (long)readbytes, (long)bblen);
+
+ /* Override the block mode we get called with depending on the input's
+ * setting. */
+ if (task->input.beam) {
+ status = h2_beam_receive(task->input.beam, task->input.bb, block,
+ H2MIN(readbytes, 32*1024));
+ }
+ else {
+ status = APR_EOF;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_task(%s): read returned", task->id);
+ if (APR_STATUS_IS_EAGAIN(status)
+ && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
+ /* chunked input handling does not seem to like it if we
+ * return with APR_EAGAIN from a GETLINE read...
+ * upload 100k test on test-ser.example.org hangs */
+ status = APR_SUCCESS;
+ }
+ else if (APR_STATUS_IS_EOF(status)) {
+ task->input.eos = 1;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ /* Inspect the buckets received, detect EOS and apply
+ * chunked encoding if necessary */
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "input.beam recv raw", task->input.bb);
+ first_data = NULL;
+ bblen = 0;
+ for (b = APR_BRIGADE_FIRST(task->input.bb);
+ b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (first_data && task->input.chunked) {
+ make_chunk(task, task->input.bb, first_data, bblen, b);
+ first_data = NULL;
+ bblen = 0;
+ }
+ if (APR_BUCKET_IS_EOS(b)) {
+ task->input.eos = 1;
+ input_handle_eos(task, f->r, b);
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "input.bb after handle eos",
+ task->input.bb);
+ }
+ }
+ else if (b->length == 0) {
+ apr_bucket_delete(b);
+ }
+ else {
+ if (!first_data) {
+ first_data = b;
+ }
+ bblen += b->length;
+ }
+ }
+ if (first_data && task->input.chunked) {
+ make_chunk(task, task->input.bb, first_data, bblen, NULL);
+ }
+
+ if (h2_task_logio_add_bytes_in) {
+ h2_task_logio_add_bytes_in(f->c, bblen);
+ }
+ }
+
+ if (task->input.eos) {
+ if (!task->input.eos_written) {
+ input_append_eos(task, f->r);
+ }
+ if (APR_BRIGADE_EMPTY(task->input.bb)) {
+ return APR_EOF;
+ }
+ }
+
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "task_input.bb", task->input.bb);
+
+ if (APR_BRIGADE_EMPTY(task->input.bb)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): no data", task->id);
+ return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, task->input.bb);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, task->input.bb, readbytes);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, task->input.bb, readbytes);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, task->input.bb, block,
+ HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): getline: %s",
+ task->id, buffer);
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(02942)
+ "h2_task, unsupported READ mode %d", mode);
+ status = APR_ENOTIMPL;
+ }
+
+ if (APLOGctrace1(f->c)) {
+ apr_brigade_length(bb, 0, &bblen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): return %ld data bytes",
+ task->id, (long)bblen);
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * task output handling
+ ******************************************************************************/
+
+static apr_status_t open_response(h2_task *task)
+{
+ h2_response *response;
+ response = h2_from_h1_get_response(task->output.from_h1);
+ if (!response) {
+ /* This happens currently when ap_die(status, r) is invoked
+ * by a read request filter. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03204)
+ "h2_task(%s): write without response for %s %s %s",
+ task->id,
+ task->request->method,
+ task->request->authority,
+ task->request->path);
+ task->c->aborted = 1;
+ return APR_ECONNABORTED;
+ }
+
+ if (h2_task_logio_add_bytes_out) {
+ /* count headers as if we'd do a HTTP/1.1 serialization */
+ task->output.written = h2_util_table_bytes(response->headers, 3)+1;
+ h2_task_logio_add_bytes_out(task->c, task->output.written);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348)
+ "h2_task(%s): open response to %s %s %s",
+ task->id, task->request->method,
+ task->request->authority,
+ task->request->path);
+ return h2_mplx_out_open(task->mplx, task->stream_id, response);
+}
+
+static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb)
+{
+ apr_off_t written, left;
+ apr_status_t status;
+ apr_brigade_length(bb, 0, &written);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): write response body (%ld bytes)",
+ task->id, (long)written);
+
+ status = h2_beam_send(task->output.beam, bb,
+ task->blocking? APR_BLOCK_READ
+ : APR_NONBLOCK_READ);
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ apr_brigade_length(bb, 0, &left);
+ written -= left;
+ status = APR_SUCCESS;
+ }
+ if (status == APR_SUCCESS) {
+ task->output.written += written;
+ if (h2_task_logio_add_bytes_out) {
+ h2_task_logio_add_bytes_out(task->c, written);
+ }
+ }
+ return status;
+}
+
+/* Bring the data from the brigade (which represents the result of the
+ * request_rec out filter chain) into the h2_mplx for further sending
+ * on the master connection.
+ */
+static apr_status_t output_write(h2_task *task, ap_filter_t* f,
+ apr_bucket_brigade* bb)
+{
+ apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+ int flush = 0;
+
+ if (APR_BRIGADE_EMPTY(bb)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): empty write", task->id);
+ return APR_SUCCESS;
+ }
+
+ if (task->frozen) {
+ h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
+ "frozen task output write, ignored", bb);
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (AP_BUCKET_IS_EOR(b)) {
+ APR_BUCKET_REMOVE(b);
+ task->eor = b;
+ }
+ else {
+ apr_bucket_delete(b);
+ }
+ }
+ return APR_SUCCESS;
+ }
+
+ if (!task->output.beam) {
+ h2_beam_create(&task->output.beam, task->pool,
+ task->stream_id, "output", 0);
+ }
+
+ /* Attempt to write saved brigade first */
+ if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
+ status = send_out(task, task->output.bb);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ /* If there is nothing saved (anymore), try to write the brigade passed */
+ if ((!task->output.bb || APR_BRIGADE_EMPTY(task->output.bb))
+ && !APR_BRIGADE_EMPTY(bb)) {
+ /* check if we have a flush before the end-of-request */
+ if (!task->output.response_open) {
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b)) {
+ if (AP_BUCKET_IS_EOR(b)) {
+ break;
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+ flush = 1;
+ }
+ }
+ }
+
+ status = send_out(task, bb);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ /* If the passed brigade is not empty, save it before return */
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03405)
+ "h2_task(%s): could not write all, saving brigade",
+ task->id);
+ if (!task->output.bb) {
+ task->output.bb = apr_brigade_create(task->pool,
+ task->c->bucket_alloc);
+ }
+ return ap_save_brigade(f, &task->output.bb, &bb, task->pool);
+ }
+
+ if (!task->output.response_open
+ && (flush || h2_beam_get_mem_used(task->output.beam) > (32*1024))) {
+ /* if we have enough buffered or we got a flush bucket, open
+ * the response now. */
+ status = open_response(task);
+ task->output.response_open = 1;
+ }
+
+ return status;
+}
+
+static apr_status_t output_finish(h2_task *task)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (!task->output.response_open) {
+ status = open_response(task);
+ task->output.response_open = 1;
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * task slave connection filters
+ ******************************************************************************/
static apr_status_t h2_filter_stream_input(ap_filter_t* filter,
apr_bucket_brigade* brigade,
@@ -57,11 +486,7 @@ static apr_status_t h2_filter_stream_input(ap_filter_t* filter,
{
h2_task *task = h2_ctx_cget_task(filter->c);
AP_DEBUG_ASSERT(task);
- if (!task->input) {
- return APR_ECONNABORTED;
- }
- return h2_task_input_read(task->input, filter, brigade,
- mode, block, readbytes);
+ return input_read(task, filter, brigade, mode, block, readbytes);
}
static apr_status_t h2_filter_stream_output(ap_filter_t* filter,
@@ -69,10 +494,7 @@ static apr_status_t h2_filter_stream_output(ap_filter_t* filter,
{
h2_task *task = h2_ctx_cget_task(filter->c);
AP_DEBUG_ASSERT(task);
- if (!task->output) {
- return APR_ECONNABORTED;
- }
- return h2_task_output_write(task->output, filter, brigade);
+ return output_write(task, filter, brigade);
}
static apr_status_t h2_filter_read_response(ap_filter_t* filter,
@@ -80,10 +502,60 @@ static apr_status_t h2_filter_read_response(ap_filter_t* filter,
{
h2_task *task = h2_ctx_cget_task(filter->c);
AP_DEBUG_ASSERT(task);
- if (!task->output || !task->output->from_h1) {
+ if (!task->output.from_h1) {
return APR_ECONNABORTED;
}
- return h2_from_h1_read_response(task->output->from_h1, filter, bb);
+ return h2_from_h1_read_response(task->output.from_h1, filter, bb);
+}
+
+/*******************************************************************************
+ * task things
+ ******************************************************************************/
+
+void h2_task_set_response(h2_task *task, h2_response *response)
+{
+ AP_DEBUG_ASSERT(response);
+ AP_DEBUG_ASSERT(!task->response);
+ /* we used to clone the response into out own pool. But
+ * we have much tighter control over the EOR bucket nowadays,
+ * so just use the instance given */
+ task->response = response;
+ if (response->rst_error) {
+ h2_task_rst(task, response->rst_error);
+ }
+}
+
+
+int h2_task_can_redo(h2_task *task) {
+ if (task->submitted
+ || (task->input.beam && h2_beam_was_received(task->input.beam))
+ || !task->request) {
+ /* cannot repeat that. */
+ return 0;
+ }
+ return (!strcmp("GET", task->request->method)
+ || !strcmp("HEAD", task->request->method)
+ || !strcmp("OPTIONS", task->request->method));
+}
+
+void h2_task_redo(h2_task *task)
+{
+ task->response = NULL;
+ task->rst_error = 0;
+}
+
+void h2_task_rst(h2_task *task, int error)
+{
+ task->rst_error = error;
+ if (task->input.beam) {
+ h2_beam_abort(task->input.beam);
+ }
+ if (task->output.beam) {
+ h2_beam_abort(task->output.beam);
+ }
+ if (task->c) {
+ task->c->aborted = 1;
+ }
}
/*******************************************************************************
@@ -153,8 +625,8 @@ static int h2_task_pre_conn(conn_rec* c, void *arg)
return OK;
}
-h2_task *h2_task_create(long session_id, const h2_request *req,
- conn_rec *c, h2_mplx *mplx)
+h2_task *h2_task_create(conn_rec *c, const h2_request *req,
+ h2_bucket_beam *input, h2_mplx *mplx)
{
apr_pool_t *pool;
h2_task *task;
@@ -164,21 +636,22 @@ h2_task *h2_task_create(long session_id, const h2_request *req,
if (task == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, c,
APLOGNO(02941) "h2_task(%ld-%d): create stream task",
- session_id, req->id);
- h2_mplx_out_close(mplx, req->id);
+ c->id, req->id);
return NULL;
}
- task->id = apr_psprintf(pool, "%ld-%d", session_id, req->id);
+ task->id = apr_psprintf(pool, "%ld-%d", c->id, req->id);
task->stream_id = req->id;
task->c = c;
task->mplx = mplx;
task->c->keepalives = mplx->c->keepalives;
task->pool = pool;
task->request = req;
- task->input_eos = !req->body;
task->ser_headers = req->serialize;
task->blocking = 1;
+ task->input.beam = input;
+
+ apr_thread_cond_create(&task->cond, pool);
h2_ctx_create_for(c, task);
return task;
@@ -186,6 +659,13 @@ h2_task *h2_task_create(long session_id, const h2_request *req,
void h2_task_destroy(h2_task *task)
{
+ if (task->output.beam) {
+ h2_beam_destroy(task->output.beam);
+ task->output.beam = NULL;
+ }
+ if (task->eor) {
+ apr_bucket_destroy(task->eor);
+ }
if (task->pool) {
apr_pool_destroy(task->pool);
}
@@ -196,17 +676,41 @@ void h2_task_set_io_blocking(h2_task *task, int blocking)
task->blocking = blocking;
}
-apr_status_t h2_task_do(h2_task *task, apr_thread_cond_t *cond)
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread)
{
- apr_status_t status;
-
AP_DEBUG_ASSERT(task);
- task->io = cond;
- task->input = h2_task_input_create(task, task->c);
- task->output = h2_task_output_create(task, task->c);
+
+ task->input.block = APR_BLOCK_READ;
+ task->input.chunked = task->request->chunked;
+ task->input.eos = !task->request->body;
+ if (task->input.eos && !task->input.chunked && !task->ser_headers) {
+ /* We do not serialize/chunk and have eos already, no need to
+ * create a bucket brigade. */
+ task->input.bb = NULL;
+ task->input.eos_written = 1;
+ }
+ else {
+ task->input.bb = apr_brigade_create(task->pool, task->c->bucket_alloc);
+ if (task->ser_headers) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): serialize request %s %s",
+ task->id, task->request->method, task->request->path);
+ apr_brigade_printf(task->input.bb, NULL,
+ NULL, "%s %s HTTP/1.1\r\n",
+ task->request->method, task->request->path);
+ apr_table_do(input_ser_header, task, task->request->headers, NULL);
+ apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n");
+ }
+ if (task->input.eos) {
+ input_append_eos(task, NULL);
+ }
+ }
+
+ task->output.from_h1 = h2_from_h1_create(task->stream_id, task->pool);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_task(%s): process connection", task->id);
+ task->c->current_thread = thread;
ap_run_process_connection(task->c);
if (task->frozen) {
@@ -214,15 +718,13 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_cond_t *cond)
"h2_task(%s): process_conn returned frozen task",
task->id);
/* cleanup delayed */
- status = APR_EAGAIN;
+ return APR_EAGAIN;
}
else {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_task(%s): processing done", task->id);
- status = APR_SUCCESS;
+ return output_finish(task);
}
-
- return status;
}
static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
@@ -235,7 +737,7 @@ static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
"h2_task(%s): create request_rec", task->id);
r = h2_request_create_rec(req, c);
if (r && (r->status == HTTP_OK)) {
- ap_update_child_status(c->sbh, SERVER_BUSY_READ, r);
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
if (cs) {
cs->state = CONN_STATE_HANDLER;
@@ -304,7 +806,7 @@ apr_status_t h2_task_freeze(h2_task *task)
{
if (!task->frozen) {
task->frozen = 1;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406)
"h2_task(%s), frozen", task->id);
}
return APR_SUCCESS;
@@ -314,7 +816,7 @@ apr_status_t h2_task_thaw(h2_task *task)
{
if (task->frozen) {
task->frozen = 0;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407)
"h2_task(%s), thawed", task->id);
}
task->detached = 1;
diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h
index 15a1d3cb..010005a3 100644
--- a/modules/http2/h2_task.h
+++ b/modules/http2/h2_task.h
@@ -38,12 +38,13 @@
*/
struct apr_thread_cond_t;
+struct h2_bucket_beam;
struct h2_conn;
struct h2_mplx;
struct h2_task;
struct h2_req_engine;
struct h2_request;
-struct h2_resp_head;
+struct h2_response;
struct h2_worker;
typedef struct h2_task h2_task;
@@ -52,32 +53,66 @@ struct h2_task {
const char *id;
int stream_id;
conn_rec *c;
- struct h2_mplx *mplx;
apr_pool_t *pool;
+
const struct h2_request *request;
+ struct h2_response *response;
+
+ struct {
+ struct h2_bucket_beam *beam;
+ apr_bucket_brigade *bb;
+ apr_bucket_brigade *tmp;
+ apr_read_type_e block;
+ unsigned int chunked : 1;
+ unsigned int eos : 1;
+ unsigned int eos_written : 1;
+ } input;
+ struct {
+ struct h2_bucket_beam *beam;
+ struct h2_from_h1 *from_h1;
+ unsigned int response_open : 1;
+ apr_off_t written;
+ apr_bucket_brigade *bb;
+ } output;
+
+ struct h2_mplx *mplx;
+ struct apr_thread_cond_t *cond;
- unsigned int filters_set : 1;
- unsigned int input_eos : 1;
- unsigned int ser_headers : 1;
- unsigned int frozen : 1;
- unsigned int blocking : 1;
- unsigned int detached : 1;
+ int rst_error; /* h2 related stream abort error */
+ unsigned int filters_set : 1;
+ unsigned int ser_headers : 1;
+ unsigned int frozen : 1;
+ unsigned int blocking : 1;
+ unsigned int detached : 1;
+ unsigned int submitted : 1; /* response has been submitted to client */
+ unsigned int worker_started : 1; /* h2_worker started processing for this io */
+ unsigned int worker_done : 1; /* h2_worker finished for this io */
- struct h2_task_input *input;
- struct h2_task_output *output;
- struct apr_thread_cond_t *io; /* used to wait for events on */
+ apr_time_t started_at; /* when processing started */
+ apr_time_t done_at; /* when processing was done */
+ apr_bucket *eor;
struct h2_req_engine *engine; /* engine hosted by this task */
struct h2_req_engine *assigned; /* engine that task has been assigned to */
request_rec *r; /* request being processed in this task */
};
-h2_task *h2_task_create(long session_id, const struct h2_request *req,
- conn_rec *c, struct h2_mplx *mplx);
+h2_task *h2_task_create(conn_rec *c, const struct h2_request *req,
+ struct h2_bucket_beam *input, struct h2_mplx *mplx);
void h2_task_destroy(h2_task *task);
-apr_status_t h2_task_do(h2_task *task, struct apr_thread_cond_t *cond);
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread);
+
+void h2_task_set_response(h2_task *task, struct h2_response *response);
+
+void h2_task_redo(h2_task *task);
+int h2_task_can_redo(h2_task *task);
+
+/**
+ * Reset the task with the given error code, resets all input/output.
+ */
+void h2_task_rst(h2_task *task, int error);
void h2_task_register_hooks(void);
/*
diff --git a/modules/http2/h2_task_input.c b/modules/http2/h2_task_input.c
deleted file mode 100644
index 3993b6b4..00000000
--- a/modules/http2/h2_task_input.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_log.h>
-#include <http_connection.h>
-
-#include "h2_private.h"
-#include "h2_conn.h"
-#include "h2_mplx.h"
-#include "h2_request.h"
-#include "h2_session.h"
-#include "h2_stream.h"
-#include "h2_task_input.h"
-#include "h2_task.h"
-#include "h2_util.h"
-
-
-static int is_aborted(ap_filter_t *f)
-{
- return (f->c->aborted);
-}
-
-static int ser_header(void *ctx, const char *name, const char *value)
-{
- h2_task_input *input = (h2_task_input*)ctx;
- apr_brigade_printf(input->bb, NULL, NULL, "%s: %s\r\n", name, value);
- return 1;
-}
-
-h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c)
-{
- h2_task_input *input = apr_pcalloc(task->pool, sizeof(h2_task_input));
- if (input) {
- input->task = task;
- input->bb = NULL;
- input->block = APR_BLOCK_READ;
-
- if (task->ser_headers) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task_input(%s): serialize request %s %s",
- task->id, task->request->method, task->request->path);
- input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
- apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n",
- task->request->method, task->request->path);
- apr_table_do(ser_header, input, task->request->headers, NULL);
- apr_brigade_puts(input->bb, NULL, NULL, "\r\n");
- if (input->task->input_eos) {
- APR_BRIGADE_INSERT_TAIL(input->bb, apr_bucket_eos_create(c->bucket_alloc));
- }
- }
- else if (!input->task->input_eos) {
- input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
- }
- else {
- /* We do not serialize and have eos already, no need to
- * create a bucket brigade. */
- }
- }
- return input;
-}
-
-void h2_task_input_block_set(h2_task_input *input, apr_read_type_e block)
-{
- input->block = block;
-}
-
-apr_status_t h2_task_input_read(h2_task_input *input,
- ap_filter_t* f,
- apr_bucket_brigade* bb,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes)
-{
- apr_status_t status = APR_SUCCESS;
- apr_off_t bblen = 0;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task_input(%s): read, block=%d, mode=%d, readbytes=%ld",
- input->task->id, block, mode, (long)readbytes);
-
- if (mode == AP_MODE_INIT) {
- return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
- }
-
- if (is_aborted(f)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task_input(%s): is aborted", input->task->id);
- return APR_ECONNABORTED;
- }
-
- if (input->bb) {
- status = apr_brigade_length(input->bb, 1, &bblen);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, f->c,
- APLOGNO(02958) "h2_task_input(%s): brigade length fail",
- input->task->id);
- return status;
- }
- }
-
- if ((bblen == 0) && input->task->input_eos) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task_input(%s): eos", input->task->id);
- return APR_EOF;
- }
-
- while (bblen == 0) {
- /* Get more data for our stream from mplx.
- */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task_input(%s): get more data from mplx, block=%d, "
- "readbytes=%ld, queued=%ld",
- input->task->id, block,
- (long)readbytes, (long)bblen);
-
- /* Override the block mode we get called with depending on the input's
- * setting.
- */
- status = h2_mplx_in_read(input->task->mplx, block,
- input->task->stream_id, input->bb,
- f->r? f->r->trailers_in : NULL,
- input->task->io);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task_input(%s): mplx in read returned",
- input->task->id);
- if (APR_STATUS_IS_EAGAIN(status)
- && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
- /* chunked input handling does not seem to like it if we
- * return with APR_EAGAIN from a GETLINE read...
- * upload 100k test on test-ser.example.org hangs */
- status = APR_SUCCESS;
- }
- else if (status != APR_SUCCESS) {
- return status;
- }
-
- status = apr_brigade_length(input->bb, 1, &bblen);
- if (status != APR_SUCCESS) {
- return status;
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task_input(%s): mplx in read, %ld bytes in brigade",
- input->task->id, (long)bblen);
- if (h2_task_logio_add_bytes_in) {
- h2_task_logio_add_bytes_in(f->c, bblen);
- }
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task_input(%s): read, mode=%d, block=%d, "
- "readbytes=%ld, queued=%ld",
- input->task->id, mode, block,
- (long)readbytes, (long)bblen);
-
- if (!APR_BRIGADE_EMPTY(input->bb)) {
- if (mode == AP_MODE_EXHAUSTIVE) {
- /* return all we have */
- status = h2_util_move(bb, input->bb, readbytes, NULL,
- "task_input_read(exhaustive)");
- }
- else if (mode == AP_MODE_READBYTES) {
- status = h2_util_move(bb, input->bb, readbytes, NULL,
- "task_input_read(readbytes)");
- }
- else if (mode == AP_MODE_SPECULATIVE) {
- /* return not more than was asked for */
- status = h2_util_copy(bb, input->bb, readbytes,
- "task_input_read(speculative)");
- }
- else if (mode == AP_MODE_GETLINE) {
- /* we are reading a single LF line, e.g. the HTTP headers */
- status = apr_brigade_split_line(bb, input->bb, block,
- HUGE_STRING_LEN);
- if (APLOGctrace1(f->c)) {
- char buffer[1024];
- apr_size_t len = sizeof(buffer)-1;
- apr_brigade_flatten(bb, buffer, &len);
- buffer[len] = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task_input(%s): getline: %s",
- input->task->id, buffer);
- }
- }
- else {
- /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
- * to support it. Seems to work. */
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
- APLOGNO(02942)
- "h2_task_input, unsupported READ mode %d", mode);
- status = APR_ENOTIMPL;
- }
-
- if (APLOGctrace1(f->c)) {
- apr_brigade_length(bb, 0, &bblen);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task_input(%s): return %ld data bytes",
- input->task->id, (long)bblen);
- }
- return status;
- }
-
- if (is_aborted(f)) {
- return APR_ECONNABORTED;
- }
-
- status = (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task_input(%s): no data", input->task->id);
- return status;
-}
-
diff --git a/modules/http2/h2_task_input.h b/modules/http2/h2_task_input.h
deleted file mode 100644
index c8913cac..00000000
--- a/modules/http2/h2_task_input.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_task_input__
-#define __mod_h2__h2_task_input__
-
-/* h2_task_input places the HEADER+DATA, formatted in HTTP/1.1, into
- * a bucket brigade. The brigade is setup as the input brigade for our
- * pseudo httpd conn_rec that is handling a specific h2_task.
- */
-struct apr_thread_cond_t;
-struct h2_mplx;
-struct h2_task;
-
-typedef struct h2_task_input h2_task_input;
-struct h2_task_input {
- struct h2_task *task;
- apr_bucket_brigade *bb;
- apr_read_type_e block;
-};
-
-
-h2_task_input *h2_task_input_create(struct h2_task *task, conn_rec *c);
-
-apr_status_t h2_task_input_read(h2_task_input *input,
- ap_filter_t* filter,
- apr_bucket_brigade* brigade,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes);
-
-void h2_task_input_block_set(h2_task_input *input, apr_read_type_e block);
-
-#endif /* defined(__mod_h2__h2_task_input__) */
diff --git a/modules/http2/h2_task_output.c b/modules/http2/h2_task_output.c
deleted file mode 100644
index 80938d1f..00000000
--- a/modules/http2/h2_task_output.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-
-#include <apr_thread_cond.h>
-#include <httpd.h>
-#include <http_core.h>
-#include <http_log.h>
-#include <http_connection.h>
-#include <http_request.h>
-
-#include "h2_private.h"
-#include "h2_conn.h"
-#include "h2_mplx.h"
-#include "h2_request.h"
-#include "h2_session.h"
-#include "h2_stream.h"
-#include "h2_from_h1.h"
-#include "h2_response.h"
-#include "h2_task_output.h"
-#include "h2_task.h"
-#include "h2_util.h"
-
-
-h2_task_output *h2_task_output_create(h2_task *task, conn_rec *c)
-{
- h2_task_output *output = apr_pcalloc(task->pool, sizeof(h2_task_output));
- if (output) {
- output->task = task;
- output->from_h1 = h2_from_h1_create(task->stream_id, task->pool);
- }
- return output;
-}
-
-static apr_status_t open_response(h2_task_output *output, ap_filter_t *f,
- apr_bucket_brigade *bb, const char *caller)
-{
- h2_response *response;
- response = h2_from_h1_get_response(output->from_h1);
- if (!response) {
- if (f) {
- /* This happens currently when ap_die(status, r) is invoked
- * by a read request filter. */
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, output->task->c, APLOGNO(03204)
- "h2_task_output(%s): write without response by %s "
- "for %s %s %s",
- output->task->id, caller,
- output->task->request->method,
- output->task->request->authority,
- output->task->request->path);
- output->task->c->aborted = 1;
- }
- if (output->task->io) {
- apr_thread_cond_broadcast(output->task->io);
- }
- return APR_ECONNABORTED;
- }
-
- if (h2_task_logio_add_bytes_out) {
- /* count headers as if we'd do a HTTP/1.1 serialization */
- output->written = h2_util_table_bytes(response->headers, 3)+1;
- h2_task_logio_add_bytes_out(output->task->c, output->written);
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03348)
- "h2_task(%s): open response to %s %s %s",
- output->task->id, output->task->request->method,
- output->task->request->authority,
- output->task->request->path);
- return h2_mplx_out_open(output->task->mplx, output->task->stream_id,
- response, f, bb, output->task->io);
-}
-
-static apr_status_t write_brigade_raw(h2_task_output *output,
- ap_filter_t* f, apr_bucket_brigade* bb)
-{
- apr_off_t written, left;
- apr_status_t status;
-
- apr_brigade_length(bb, 0, &written);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
- "h2_task(%s): write response body (%ld bytes)",
- output->task->id, (long)written);
-
- status = h2_mplx_out_write(output->task->mplx, output->task->stream_id,
- f, output->task->blocking, bb, output->task->io);
- if (status == APR_INCOMPLETE) {
- apr_brigade_length(bb, 0, &left);
- written -= left;
- status = APR_SUCCESS;
- }
-
- if (status == APR_SUCCESS) {
- output->written += written;
- if (h2_task_logio_add_bytes_out) {
- h2_task_logio_add_bytes_out(output->task->c, written);
- }
- }
- return status;
-}
-
-/* Bring the data from the brigade (which represents the result of the
- * request_rec out filter chain) into the h2_mplx for further sending
- * on the master connection.
- */
-apr_status_t h2_task_output_write(h2_task_output *output,
- ap_filter_t* f, apr_bucket_brigade* bb)
-{
- apr_bucket *b;
- apr_status_t status = APR_SUCCESS;
-
- if (APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
- "h2_task(%s): empty write", output->task->id);
- return APR_SUCCESS;
- }
-
- if (output->task->frozen) {
- h2_util_bb_log(output->task->c, output->task->stream_id, APLOG_TRACE2,
- "frozen task output write, ignored", bb);
- while (!APR_BRIGADE_EMPTY(bb)) {
- b = APR_BRIGADE_FIRST(bb);
- if (AP_BUCKET_IS_EOR(b)) {
- /* TODO: keep it */
- APR_BUCKET_REMOVE(b);
- }
- else {
- apr_bucket_delete(b);
- }
- }
- return APR_SUCCESS;
- }
-
- if (!output->response_open) {
- status = open_response(output, f, bb, "write");
- output->response_open = 1;
- }
-
- /* Attempt to write saved brigade first */
- if (status == APR_SUCCESS && output->bb && !APR_BRIGADE_EMPTY(output->bb)) {
- status = write_brigade_raw(output, f, output->bb);
- }
-
- /* If there is nothing saved (anymore), try to write the brigade passed */
- if (status == APR_SUCCESS
- && (!output->bb || APR_BRIGADE_EMPTY(output->bb))
- && !APR_BRIGADE_EMPTY(bb)) {
- status = write_brigade_raw(output, f, bb);
- }
-
- /* If the passed brigade is not empty, save it before return */
- if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->task->c,
- "h2_task(%s): could not write all, saving brigade",
- output->task->id);
- if (!output->bb) {
- output->bb = apr_brigade_create(output->task->pool, output->task->c->bucket_alloc);
- }
- return ap_save_brigade(f, &output->bb, &bb, output->task->pool);
- }
-
- return status;
-}
-
diff --git a/modules/http2/h2_task_output.h b/modules/http2/h2_task_output.h
deleted file mode 100644
index 3135bc45..00000000
--- a/modules/http2/h2_task_output.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_task_output__
-#define __mod_h2__h2_task_output__
-
-/* h2_task_output reads a HTTP/1 response from the brigade and applies
- * them to a h2_output_converter. The brigade is setup as the output brigade
- * for our pseudo httpd conn_rec that is handling a specific h2_task.
- *
- */
-struct apr_thread_cond_t;
-struct h2_mplx;
-struct h2_task;
-struct h2_from_h1;
-
-typedef struct h2_task_output h2_task_output;
-
-struct h2_task_output {
- struct h2_task *task;
- struct h2_from_h1 *from_h1;
-
- unsigned int response_open : 1;
-
- apr_off_t written;
- apr_bucket_brigade *bb;
-};
-
-h2_task_output *h2_task_output_create(struct h2_task *task, conn_rec *c);
-
-apr_status_t h2_task_output_write(h2_task_output *output,
- ap_filter_t* filter,
- apr_bucket_brigade* brigade);
-
-apr_status_t h2_task_output_freeze(h2_task_output *output);
-apr_status_t h2_task_output_thaw(h2_task_output *output);
-
-#endif /* defined(__mod_h2__h2_task_output__) */
diff --git a/modules/http2/h2_task_queue.c b/modules/http2/h2_task_queue.c
deleted file mode 100644
index 2871cabc..00000000
--- a/modules/http2/h2_task_queue.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <httpd.h>
-#include <http_core.h>
-
-#include "h2_task_queue.h"
-
-
-static void tq_grow(h2_task_queue *q, int nlen);
-static void tq_swap(h2_task_queue *q, int i, int j);
-static int tq_bubble_up(h2_task_queue *q, int i, int top,
- h2_tq_cmp *cmp, void *ctx);
-static int tq_bubble_down(h2_task_queue *q, int i, int bottom,
- h2_tq_cmp *cmp, void *ctx);
-
-h2_task_queue *h2_tq_create(apr_pool_t *pool, int capacity)
-{
- h2_task_queue *q = apr_pcalloc(pool, sizeof(h2_task_queue));
- if (q) {
- q->pool = pool;
- tq_grow(q, capacity);
- q->nelts = 0;
- }
- return q;
-}
-
-int h2_tq_empty(h2_task_queue *q)
-{
- return q->nelts == 0;
-}
-
-void h2_tq_add(h2_task_queue *q, int sid, h2_tq_cmp *cmp, void *ctx)
-{
- int i;
-
- if (q->nelts >= q->nalloc) {
- tq_grow(q, q->nalloc * 2);
- }
-
- i = (q->head + q->nelts) % q->nalloc;
- q->elts[i] = sid;
- ++q->nelts;
-
- /* bubble it to the front of the queue */
- tq_bubble_up(q, i, q->head, cmp, ctx);
-}
-
-int h2_tq_remove(h2_task_queue *q, int sid)
-{
- int i;
- for (i = 0; i < q->nelts; ++i) {
- if (sid == q->elts[(q->head + i) % q->nalloc]) {
- break;
- }
- }
-
- if (i < q->nelts) {
- ++i;
- for (; i < q->nelts; ++i) {
- q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
- }
- --q->nelts;
- return 1;
- }
- return 0;
-}
-
-void h2_tq_sort(h2_task_queue *q, h2_tq_cmp *cmp, void *ctx)
-{
- /* Assume that changes in ordering are minimal. This needs,
- * best case, q->nelts - 1 comparisions to check that nothing
- * changed.
- */
- if (q->nelts > 0) {
- int i, ni, prev, last;
-
- /* Start at the end of the queue and create a tail of sorted
- * entries. Make that tail one element longer in each iteration.
- */
- last = i = (q->head + q->nelts - 1) % q->nalloc;
- while (i != q->head) {
- prev = (q->nalloc + i - 1) % q->nalloc;
-
- ni = tq_bubble_up(q, i, prev, cmp, ctx);
- if (ni == prev) {
- /* i bubbled one up, bubble the new i down, which
- * keeps all tasks below i sorted. */
- tq_bubble_down(q, i, last, cmp, ctx);
- }
- i = prev;
- };
- }
-}
-
-
-int h2_tq_shift(h2_task_queue *q)
-{
- int sid;
-
- if (q->nelts <= 0) {
- return 0;
- }
-
- sid = q->elts[q->head];
- q->head = (q->head + 1) % q->nalloc;
- q->nelts--;
-
- return sid;
-}
-
-static void tq_grow(h2_task_queue *q, int nlen)
-{
- AP_DEBUG_ASSERT(q->nalloc <= nlen);
- if (nlen > q->nalloc) {
- int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
- if (q->nelts > 0) {
- int l = ((q->head + q->nelts) % q->nalloc) - q->head;
-
- memmove(nq, q->elts + q->head, sizeof(int) * l);
- if (l < q->nelts) {
- /* elts wrapped, append elts in [0, remain] to nq */
- int remain = q->nelts - l;
- memmove(nq + l, q->elts, sizeof(int) * remain);
- }
- }
- q->elts = nq;
- q->nalloc = nlen;
- q->head = 0;
- }
-}
-
-static void tq_swap(h2_task_queue *q, int i, int j)
-{
- int x = q->elts[i];
- q->elts[i] = q->elts[j];
- q->elts[j] = x;
-}
-
-static int tq_bubble_up(h2_task_queue *q, int i, int top,
- h2_tq_cmp *cmp, void *ctx)
-{
- int prev;
- while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
- && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
- tq_swap(q, prev, i);
- i = prev;
- }
- return i;
-}
-
-static int tq_bubble_down(h2_task_queue *q, int i, int bottom,
- h2_tq_cmp *cmp, void *ctx)
-{
- int next;
- while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
- && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
- tq_swap(q, next, i);
- i = next;
- }
- return i;
-}
diff --git a/modules/http2/h2_task_queue.h b/modules/http2/h2_task_queue.h
deleted file mode 100644
index 3ff1d396..00000000
--- a/modules/http2/h2_task_queue.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_task_queue__
-#define __mod_h2__h2_task_queue__
-
-/**
- * h2_task_queue keeps a list of sorted h2_task* in ascending order.
- */
-typedef struct h2_task_queue h2_task_queue;
-
-struct h2_task_queue {
- int *elts;
- int head;
- int nelts;
- int nalloc;
- apr_pool_t *pool;
-};
-
-/**
- * Comparator for two task to determine their order.
- *
- * @param s1 stream id to compare
- * @param s2 stream id to compare
- * @param ctx provided user data
- * @return value is the same as for strcmp() and has the effect:
- * == 0: s1 and s2 are treated equal in ordering
- * < 0: s1 should be sorted before s2
- * > 0: s2 should be sorted before s1
- */
-typedef int h2_tq_cmp(int s1, int s2, void *ctx);
-
-
-/**
- * Allocate a new queue from the pool and initialize.
- * @param id the identifier of the queue
- * @param pool the memory pool
- */
-h2_task_queue *h2_tq_create(apr_pool_t *pool, int capacity);
-
-/**
- * Return != 0 iff there are no tasks in the queue.
- * @param q the queue to check
- */
-int h2_tq_empty(h2_task_queue *q);
-
-/**
- * Add a stream idto the queue.
- *
- * @param q the queue to append the task to
- * @param sid the stream id to add
- * @param cmp the comparator for sorting
- * @param ctx user data for comparator
- */
-void h2_tq_add(h2_task_queue *q, int sid, h2_tq_cmp *cmp, void *ctx);
-
-/**
- * Remove the stream id from the queue. Return != 0 iff task
- * was found in queue.
- * @param q the task queue
- * @param sid the stream id to remove
- * @return != 0 iff task was found in queue
- */
-int h2_tq_remove(h2_task_queue *q, int sid);
-
-/**
- * Sort the stream idqueue again. Call if the task ordering
- * has changed.
- *
- * @param q the queue to sort
- * @param cmp the comparator for sorting
- * @param ctx user data for the comparator
- */
-void h2_tq_sort(h2_task_queue *q, h2_tq_cmp *cmp, void *ctx);
-
-/**
- * Get the first stream id from the queue or NULL if the queue is empty.
- * The task will be removed.
- *
- * @param q the queue to get the first task from
- * @return the first stream id of the queue, 0 if empty
- */
-int h2_tq_shift(h2_task_queue *q);
-
-#endif /* defined(__mod_h2__h2_task_queue__) */
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
index 06472425..8d1060e5 100644
--- a/modules/http2/h2_util.c
+++ b/modules/http2/h2_util.c
@@ -23,8 +23,7 @@
#include <nghttp2/nghttp2.h>
-#include "h2_private.h"
-#include "h2_request.h"
+#include "h2.h"
#include "h2_util.h"
/* h2_log2(n) iff n is a power of 2 */
@@ -286,7 +285,7 @@ size_t h2_ihash_count(h2_ihash_t *ih)
return apr_hash_count(ih->hash);
}
-int h2_ihash_is_empty(h2_ihash_t *ih)
+int h2_ihash_empty(h2_ihash_t *ih)
{
return apr_hash_count(ih->hash) == 0;
}
@@ -326,11 +325,254 @@ void h2_ihash_remove(h2_ihash_t *ih, int id)
apr_hash_set(ih->hash, &id, sizeof(id), NULL);
}
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
void h2_ihash_clear(h2_ihash_t *ih)
{
apr_hash_clear(ih->hash);
}
+typedef struct {
+ h2_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ int *buffer;
+ size_t max;
+ size_t len;
+} icollect_ctx;
+
+static int icollect_iter(void *x, void *val)
+{
+ icollect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff));
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max)
+{
+ icollect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, icollect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_iqueue *q, int nlen);
+static void iq_swap(h2_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx);
+
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
+ if (q) {
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ }
+ return q;
+}
+
+int h2_iq_empty(h2_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_iq_count(h2_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+}
+
+int h2_iq_remove(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_iq_clear(h2_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisions to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all tasks below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_iq_shift(h2_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+static void iq_grow(h2_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
/*******************************************************************************
* h2_util for apt_table_t
******************************************************************************/
@@ -368,15 +610,6 @@ apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra)
* h2_util for bucket brigades
******************************************************************************/
-/* DEEP_COPY==0 crashes under load. I think the setaside is fine,
- * however buckets moved to another thread will still be
- * free'd against the old bucket_alloc. *And* if the old
- * pool gets destroyed too early, the bucket disappears while
- * still needed.
- */
-static const int DEEP_COPY = 1;
-static const int FILE_MOVE = 1;
-
static apr_status_t last_not_included(apr_bucket_brigade *bb,
apr_off_t maxlen,
int same_alloc,
@@ -397,11 +630,6 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb,
/* included */
}
else {
- if (maxlen == 0) {
- *pend = b;
- return status;
- }
-
if (b->length == ((apr_size_t)-1)) {
const char *ign;
apr_size_t ilen;
@@ -411,6 +639,11 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb,
}
}
+ if (maxlen == 0 && b->length > 0) {
+ *pend = b;
+ return status;
+ }
+
if (same_alloc && APR_BUCKET_IS_FILE(b)) {
/* we like it move it, always */
}
@@ -434,200 +667,95 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb,
return status;
}
-#define LOG_BUCKETS 0
-#define LOG_LEVEL APLOG_INFO
-
-apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, apr_size_t *pfile_buckets_allowed,
- const char *msg)
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
{
+ apr_bucket *b, *next;
+ apr_off_t remain = length;
apr_status_t status = APR_SUCCESS;
- int same_alloc;
- AP_DEBUG_ASSERT(to);
- AP_DEBUG_ASSERT(from);
- same_alloc = (to->bucket_alloc == from->bucket_alloc
- || to->p == from->p);
-
- if (!FILE_MOVE) {
- pfile_buckets_allowed = NULL;
- }
-
- if (!APR_BRIGADE_EMPTY(from)) {
- apr_bucket *b, *end;
+ for (b = APR_BRIGADE_FIRST(src);
+ b != APR_BRIGADE_SENTINEL(src);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
- status = last_not_included(from, maxlen, same_alloc,
- pfile_buckets_allowed, &end);
- if (status != APR_SUCCESS) {
- return status;
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* fall through */
}
-
- while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) {
- b = APR_BRIGADE_FIRST(from);
- if (b == end) {
- break;
+ else {
+ if (remain == b->length) {
+ /* fall through */
}
-
- if (same_alloc || (b->list == to->bucket_alloc)) {
- /* both brigades use the same bucket_alloc and auto-cleanups
- * have the same life time. It's therefore safe to just move
- * directly. */
- APR_BUCKET_REMOVE(b);
- APR_BRIGADE_INSERT_TAIL(to, b);
-#if LOG_BUCKETS
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03205)
- "h2_util_move: %s, passed bucket(same bucket_alloc) "
- "%ld-%ld, type=%s",
- msg, (long)b->start, (long)b->length,
- APR_BUCKET_IS_METADATA(b)?
- (APR_BUCKET_IS_EOS(b)? "EOS":
- (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) :
- (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA"));
-#endif
+ else if (remain <= 0) {
+ return status;
}
- else if (DEEP_COPY) {
- /* we have not managed the magic of passing buckets from
- * one thread to another. Any attempts result in
- * cleanup of pools scrambling memory.
- */
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
- }
- else {
- /* ignore */
- }
- }
- else if (pfile_buckets_allowed
- && *pfile_buckets_allowed > 0
- && APR_BUCKET_IS_FILE(b)) {
- /* We do not want to read files when passing buckets, if
- * we can avoid it. However, what we've come up so far
- * is not working corrently, resulting either in crashes or
- * too many open file descriptors.
- */
- apr_bucket_file *f = (apr_bucket_file *)b->data;
- apr_file_t *fd = f->fd;
- int setaside = (f->readpool != to->p);
-#if LOG_BUCKETS
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03206)
- "h2_util_move: %s, moving FILE bucket %ld-%ld "
- "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d",
- msg, (long)b->start, (long)b->length,
- (long)from, (long)from->p,
- (long)to, (long)to->p, setaside);
-#endif
- if (setaside) {
- status = apr_file_setaside(&fd, fd, to->p);
- if (status != APR_SUCCESS) {
- ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p,
- APLOGNO(02947) "h2_util: %s, setaside FILE",
- msg);
- return status;
- }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
}
- apr_brigade_insert_file(to, fd, b->start, b->length,
- to->p);
- --(*pfile_buckets_allowed);
}
- else {
- const char *data;
- apr_size_t len;
-
- status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
- if (status == APR_SUCCESS && len > 0) {
- status = apr_brigade_write(to, NULL, NULL, data, len);
-#if LOG_BUCKETS
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03207)
- "h2_util_move: %s, copied bucket %ld-%ld "
- "from=%lx(p=%lx) to=%lx(p=%lx)",
- msg, (long)b->start, (long)b->length,
- (long)from, (long)from->p,
- (long)to, (long)to->p);
-#endif
- }
+
+ if (remain < b->length) {
+ apr_bucket_split(b, remain);
}
- apr_bucket_delete(b);
- }
- else {
- apr_bucket_setaside(b, to->p);
- APR_BUCKET_REMOVE(b);
- APR_BRIGADE_INSERT_TAIL(to, b);
-#if LOG_BUCKETS
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03208)
- "h2_util_move: %s, passed setaside bucket %ld-%ld "
- "from=%lx(p=%lx) to=%lx(p=%lx)",
- msg, (long)b->start, (long)b->length,
- (long)from, (long)from->p,
- (long)to, (long)to->p);
-#endif
}
}
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ remain -= b->length;
}
-
return status;
}
-apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, const char *msg)
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
{
+ apr_bucket *b, *next;
+ apr_off_t remain = length;
apr_status_t status = APR_SUCCESS;
- int same_alloc;
-
- (void)msg;
- AP_DEBUG_ASSERT(to);
- AP_DEBUG_ASSERT(from);
- same_alloc = (to->bucket_alloc == from->bucket_alloc);
-
- if (!APR_BRIGADE_EMPTY(from)) {
- apr_bucket *b, *end, *cpy;
+
+ for (b = APR_BRIGADE_FIRST(src);
+ b != APR_BRIGADE_SENTINEL(src);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
- status = last_not_included(from, maxlen, 0, 0, &end);
- if (status != APR_SUCCESS) {
- return status;
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* fall through */
}
-
- for (b = APR_BRIGADE_FIRST(from);
- b != APR_BRIGADE_SENTINEL(from) && b != end;
- b = APR_BUCKET_NEXT(b))
- {
- if (same_alloc) {
- status = apr_bucket_copy(b, &cpy);
- if (status != APR_SUCCESS) {
- break;
- }
- APR_BRIGADE_INSERT_TAIL(to, cpy);
+ else {
+ if (remain == b->length) {
+ /* fall through */
+ }
+ else if (remain <= 0) {
+ return status;
}
else {
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
- }
- else if (APR_BUCKET_IS_FLUSH(b)) {
- APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
- }
- else {
- /* ignore */
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
}
}
- else {
- const char *data;
- apr_size_t len;
- status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
- if (status == APR_SUCCESS && len > 0) {
- status = apr_brigade_write(to, NULL, NULL, data, len);
-#if LOG_BUCKETS
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03209)
- "h2_util_copy: %s, copied bucket %ld-%ld "
- "from=%lx(p=%lx) to=%lx(p=%lx)",
- msg, (long)b->start, (long)b->length,
- (long)from, (long)from->p,
- (long)to, (long)to->p);
-#endif
- }
+
+ if (remain < b->length) {
+ apr_bucket_split(b, remain);
}
}
}
+ status = apr_bucket_copy(b, &b);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ remain -= b->length;
}
return status;
}
@@ -652,39 +780,6 @@ int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len)
return 0;
}
-int h2_util_bb_has_data(apr_bucket_brigade *bb)
-{
- apr_bucket *b;
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b))
- {
- if (!AP_BUCKET_IS_EOR(b)) {
- return 1;
- }
- }
- return 0;
-}
-
-int h2_util_bb_has_data_or_eos(apr_bucket_brigade *bb)
-{
- apr_bucket *b;
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b))
- {
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- return 1;
- }
- }
- else {
- return 1;
- }
- }
- return 0;
-}
-
apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
apr_off_t *plen, int *peos)
{
@@ -789,186 +884,89 @@ apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
return status;
}
-void h2_util_bb_log(conn_rec *c, int stream_id, int level,
- const char *tag, apr_bucket_brigade *bb)
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep)
{
- char buffer[16 * 1024];
- const char *line = "(null)";
- apr_size_t bmax = sizeof(buffer)/sizeof(buffer[0]);
- int off = 0;
- apr_bucket *b;
+ apr_size_t off = 0;
+ if (sep && *sep) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s", sep);
+ }
- if (bb) {
- memset(buffer, 0, bmax--);
- for (b = APR_BRIGADE_FIRST(bb);
- bmax && (b != APR_BRIGADE_SENTINEL(bb));
- b = APR_BUCKET_NEXT(b)) {
-
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- off += apr_snprintf(buffer+off, bmax-off, "eos ");
- }
- else if (APR_BUCKET_IS_FLUSH(b)) {
- off += apr_snprintf(buffer+off, bmax-off, "flush ");
- }
- else if (AP_BUCKET_IS_EOR(b)) {
- off += apr_snprintf(buffer+off, bmax-off, "eor ");
- }
- else {
- off += apr_snprintf(buffer+off, bmax-off, "meta(unknown) ");
- }
- }
- else {
- const char *btype = "data";
- if (APR_BUCKET_IS_FILE(b)) {
- btype = "file";
- }
- else if (APR_BUCKET_IS_PIPE(b)) {
- btype = "pipe";
- }
- else if (APR_BUCKET_IS_SOCKET(b)) {
- btype = "socket";
- }
- else if (APR_BUCKET_IS_HEAP(b)) {
- btype = "heap";
- }
- else if (APR_BUCKET_IS_TRANSIENT(b)) {
- btype = "transient";
- }
- else if (APR_BUCKET_IS_IMMORTAL(b)) {
- btype = "immortal";
- }
-#if APR_HAS_MMAP
- else if (APR_BUCKET_IS_MMAP(b)) {
- btype = "mmap";
- }
-#endif
- else if (APR_BUCKET_IS_POOL(b)) {
- btype = "pool";
- }
-
- off += apr_snprintf(buffer+off, bmax-off, "%s[%ld] ",
- btype,
- (long)(b->length == ((apr_size_t)-1)?
- -1 : b->length));
- }
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eos");
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "flush");
+ }
+ else if (AP_BUCKET_IS_EOR(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eor");
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "meta(unknown)");
}
- line = *buffer? buffer : "(empty)";
}
- /* Intentional no APLOGNO */
- ap_log_cerror(APLOG_MARK, level, 0, c, "bb_dump(%ld-%d)-%s: %s",
- c->id, stream_id, tag, line);
-
-}
-
-apr_status_t h2_ltransfer_brigade(apr_bucket_brigade *to,
- apr_bucket_brigade *from,
- apr_pool_t *p,
- apr_off_t *plen,
- int *peos)
-{
- apr_bucket *e;
- apr_off_t len = 0, remain = *plen;
- apr_status_t rv;
-
- *peos = 0;
-
- while (!APR_BRIGADE_EMPTY(from)) {
- e = APR_BRIGADE_FIRST(from);
-
- if (APR_BUCKET_IS_METADATA(e)) {
- if (APR_BUCKET_IS_EOS(e)) {
- *peos = 1;
- }
+ else {
+ const char *btype = "data";
+ if (APR_BUCKET_IS_FILE(b)) {
+ btype = "file";
}
- else {
- if (remain > 0 && e->length == ((apr_size_t)-1)) {
- const char *ign;
- apr_size_t ilen;
- rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ);
- if (rv != APR_SUCCESS) {
- return rv;
- }
- }
-
- if (remain < e->length) {
- if (remain <= 0) {
- return APR_SUCCESS;
- }
- apr_bucket_split(e, remain);
- }
+ else if (APR_BUCKET_IS_PIPE(b)) {
+ btype = "pipe";
}
-
- rv = apr_bucket_setaside(e, p);
-
- /* If the bucket type does not implement setaside, then
- * (hopefully) morph it into a bucket type which does, and set
- * *that* aside... */
- if (rv == APR_ENOTIMPL) {
- const char *s;
- apr_size_t n;
-
- rv = apr_bucket_read(e, &s, &n, APR_BLOCK_READ);
- if (rv == APR_SUCCESS) {
- rv = apr_bucket_setaside(e, p);
- }
+ else if (APR_BUCKET_IS_SOCKET(b)) {
+ btype = "socket";
}
-
- if (rv != APR_SUCCESS) {
- /* Return an error but still save the brigade if
- * ->setaside() is really not implemented. */
- if (rv != APR_ENOTIMPL) {
- return rv;
- }
+ else if (APR_BUCKET_IS_HEAP(b)) {
+ btype = "heap";
+ }
+ else if (APR_BUCKET_IS_TRANSIENT(b)) {
+ btype = "transient";
+ }
+ else if (APR_BUCKET_IS_IMMORTAL(b)) {
+ btype = "immortal";
+ }
+#if APR_HAS_MMAP
+ else if (APR_BUCKET_IS_MMAP(b)) {
+ btype = "mmap";
+ }
+#endif
+ else if (APR_BUCKET_IS_POOL(b)) {
+ btype = "pool";
}
- APR_BUCKET_REMOVE(e);
- APR_BRIGADE_INSERT_TAIL(to, e);
- len += e->length;
- remain -= e->length;
+ off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]",
+ btype,
+ (long)(b->length == ((apr_size_t)-1)?
+ -1 : b->length));
}
-
- *plen = len;
- return APR_SUCCESS;
+ return off;
}
-apr_status_t h2_transfer_brigade(apr_bucket_brigade *to,
- apr_bucket_brigade *from,
- apr_pool_t *p)
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb)
{
- apr_bucket *e;
- apr_status_t rv;
-
- while (!APR_BRIGADE_EMPTY(from)) {
- e = APR_BRIGADE_FIRST(from);
-
- rv = apr_bucket_setaside(e, p);
-
- /* If the bucket type does not implement setaside, then
- * (hopefully) morph it into a bucket type which does, and set
- * *that* aside... */
- if (rv == APR_ENOTIMPL) {
- const char *s;
- apr_size_t n;
+ apr_size_t off = 0;
+ const char *sp = "";
+ apr_bucket *b;
+
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
+ for (b = APR_BRIGADE_FIRST(bb);
+ bmax && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
- rv = apr_bucket_read(e, &s, &n, APR_BLOCK_READ);
- if (rv == APR_SUCCESS) {
- rv = apr_bucket_setaside(e, p);
- }
+ off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
+ sp = " ";
}
-
- if (rv != APR_SUCCESS) {
- /* Return an error but still save the brigade if
- * ->setaside() is really not implemented. */
- if (rv != APR_ENOTIMPL) {
- return rv;
- }
- }
-
- APR_BUCKET_REMOVE(e);
- APR_BRIGADE_INSERT_TAIL(to, e);
+ off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
}
- return APR_SUCCESS;
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
+ }
+ return off;
}
apr_status_t h2_append_brigade(apr_bucket_brigade *to,
@@ -988,6 +986,8 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to,
if (APR_BUCKET_IS_METADATA(e)) {
if (APR_BUCKET_IS_EOS(e)) {
*peos = 1;
+ apr_bucket_delete(e);
+ continue;
}
}
else {
@@ -1235,6 +1235,107 @@ int h2_proxy_res_ignore_header(const char *name, size_t len)
|| ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
}
+apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+ const char *existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+ return APR_SUCCESS;
+ }
+ }
+ else if (H2_HD_MATCH_LIT("host", name, nlen)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * h2 request handling
+ ******************************************************************************/
+
+h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header, int serialize)
+{
+ h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+
+ req->id = id;
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+ req->serialize = serialize;
+
+ return req;
+}
+
+h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize)
+{
+ return h2_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
+}
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ size_t klen = strlen(key);
+ if (!h2_req_ignore_header(key, klen)) {
+ h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_status_t h2_req_make(h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers)
+{
+ h1_ctx x;
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+
+ AP_DEBUG_ASSERT(req->scheme);
+ AP_DEBUG_ASSERT(req->authority);
+ AP_DEBUG_ASSERT(req->path);
+ AP_DEBUG_ASSERT(req->method);
+
+ x.pool = pool;
+ x.headers = req->headers;
+ apr_table_do(set_h1_header, &x, headers, NULL);
+ return APR_SUCCESS;
+}
/*******************************************************************************
* frame logging
@@ -1298,7 +1399,7 @@ int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
}
case NGHTTP2_GOAWAY: {
size_t len = (frame->goaway.opaque_data_len < s_len)?
- frame->goaway.opaque_data_len : s_len-1;
+ frame->goaway.opaque_data_len : s_len-1;
memcpy(scratch, frame->goaway.opaque_data, len);
scratch[len] = '\0';
return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
index 4ca2f9b6..7cae0ac0 100644
--- a/modules/http2/h2_util.h
+++ b/modules/http2/h2_util.h
@@ -16,6 +16,8 @@
#ifndef __mod_h2__h2_util__
#define __mod_h2__h2_util__
+#include <nghttp2/nghttp2.h>
+
/*******************************************************************************
* some debugging/format helpers
******************************************************************************/
@@ -47,7 +49,7 @@ typedef int h2_ihash_iter_t(void *ctx, void *val);
h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
size_t h2_ihash_count(h2_ihash_t *ih);
-int h2_ihash_is_empty(h2_ihash_t *ih);
+int h2_ihash_empty(h2_ihash_t *ih);
void *h2_ihash_get(h2_ihash_t *ih, int id);
/**
@@ -56,14 +58,104 @@ void *h2_ihash_get(h2_ihash_t *ih, int id);
* @param ih the hash to iterate over
* @param fn the function to invoke on each member
* @param ctx user supplied data passed into each iteration call
- * @param 0 if one iteration returned 0, otherwise != 0
+ * @return 0 if one iteration returned 0, otherwise != 0
*/
int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx);
void h2_ihash_add(h2_ihash_t *ih, void *val);
void h2_ihash_remove(h2_ihash_t *ih, int id);
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val);
void h2_ihash_clear(h2_ihash_t *ih);
-
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param id the identifier of the queue
+ * @param pool the memory pool
+ */
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no tasks in the queue.
+ * @param q the queue to check
+ */
+int h2_iq_empty(h2_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_iq_count(h2_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the task to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ */
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Remove the stream id from the queue. Return != 0 iff task
+ * was found in queue.
+ * @param q the task queue
+ * @param sid the stream id to remove
+ * @return != 0 iff task was found in queue
+ */
+int h2_iq_remove(h2_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_iq_clear(h2_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the task ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first stream id from the queue or NULL if the queue is empty.
+ * The task will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @return the first stream id of the queue, 0 if empty
+ */
+int h2_iq_shift(h2_iqueue *q);
+
/*******************************************************************************
* common helpers
******************************************************************************/
@@ -161,44 +253,51 @@ h2_ngheader *h2_util_ngheader_make_res(apr_pool_t *p,
h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
const struct h2_request *req);
+apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+/*******************************************************************************
+ * h2_request helpers
+ ******************************************************************************/
+
+struct h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header,
+ int serialize);
+struct h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize);
+
+apr_status_t h2_req_make(struct h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers);
+
/*******************************************************************************
* apr brigade helpers
******************************************************************************/
+
/**
- * Moves data from one brigade into another. If maxlen > 0, it only
- * moves up to maxlen bytes into the target brigade, making bucket splits
- * if needed.
- * @param to the brigade to move the data to
- * @param from the brigade to get the data from
- * @param maxlen of bytes to move, <= 0 for all
- * @param pfile_buckets_allowed how many file buckets may be moved,
- * may be 0 or NULL
- * @param msg message for use in logging
+ * Concatenate at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
*/
-apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, apr_size_t *pfile_buckets_allowed,
- const char *msg);
-
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
/**
- * Copies buckets from one brigade into another. If maxlen > 0, it only
- * copies up to maxlen bytes into the target brigade, making bucket splits
- * if needed.
- * @param to the brigade to copy the data to
- * @param from the brigade to get the data from
- * @param maxlen of bytes to copy, <= 0 for all
- * @param msg message for use in logging
+ * Copy at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
*/
-apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_off_t maxlen, const char *msg);
-
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
/**
* Return != 0 iff there is a FLUSH or EOS bucket in the brigade.
* @param bb the brigade to check on
* @return != 0 iff brigade holds FLUSH or EOS bucket (or both)
*/
int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len);
-int h2_util_bb_has_data(apr_bucket_brigade *bb);
-int h2_util_bb_has_data_or_eos(apr_bucket_brigade *bb);
/**
* Check how many bytes of the desired amount are available and if the
@@ -230,43 +329,39 @@ apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
apr_off_t *plen, int *peos);
/**
+ * Print a bucket's meta data (type and length) to the buffer.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep);
+
+/**
+ * Prints the brigade bucket types and lengths into the given buffer
+ * up to bmax.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb);
+/**
* Logs the bucket brigade (which bucket types with what length)
* to the log at the given level.
* @param c the connection to log for
- * @param stream_id the stream identifier this brigade belongs to
+ * @param sid the stream identifier this brigade belongs to
* @param level the log level (as in APLOG_*)
* @param tag a short message text about the context
* @param bb the brigade to log
*/
-void h2_util_bb_log(conn_rec *c, int stream_id, int level,
- const char *tag, apr_bucket_brigade *bb);
+#define h2_util_bb_log(c, sid, level, tag, bb) \
+do { \
+ char buffer[4 * 1024]; \
+ const char *line = "(null)"; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \
+ ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld-%d): %s", \
+ (c)->id, (int)(sid), (len? buffer : line)); \
+} while(0)
-/**
- * Transfer buckets from one brigade to another with a limit on the
- * maximum amount of bytes transfered. Sets aside the buckets to
- * pool p.
- * @param to brigade to transfer buckets to
- * @param from brigades to remove buckets from
- * @param p pool that buckets should be setaside to
- * @param plen maximum bytes to transfer, actual bytes transferred
- * @param peos if an EOS bucket was transferred
- */
-apr_status_t h2_ltransfer_brigade(apr_bucket_brigade *to,
- apr_bucket_brigade *from,
- apr_pool_t *p,
- apr_off_t *plen,
- int *peos);
-
-/**
- * Transfer all buckets from one brigade to another. Sets aside the buckets to
- * pool p.
- * @param to brigade to transfer buckets to
- * @param from brigades to remove buckets from
- * @param p pool that buckets should be setaside to
- */
-apr_status_t h2_transfer_brigade(apr_bucket_brigade *to,
- apr_bucket_brigade *from,
- apr_pool_t *p);
/**
* Transfer buckets from one brigade to another with a limit on the
diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h
index d68130db..abf69c1d 100644
--- a/modules/http2/h2_version.h
+++ b/modules/http2/h2_version.h
@@ -26,7 +26,7 @@
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.4.6"
+#define MOD_HTTP2_VERSION "1.5.11"
/**
* @macro
@@ -34,7 +34,7 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x010406
+#define MOD_HTTP2_VERSION_NUM 0x01050b
#endif /* mod_h2_h2_version_h */
diff --git a/modules/http2/h2_worker.c b/modules/http2/h2_worker.c
index ca6ce3a2..44feac14 100644
--- a/modules/http2/h2_worker.c
+++ b/modules/http2/h2_worker.c
@@ -42,10 +42,8 @@ static void* APR_THREAD_FUNC execute(apr_thread_t *thread, void *wctx)
/* Get a h2_task from the main workers queue. */
worker->get_next(worker, worker->ctx, &task, &sticky);
while (task) {
- h2_task_do(task, worker->io);
-
- /* if someone was waiting on this task, time to wake up */
- apr_thread_cond_signal(worker->io);
+
+ h2_task_do(task, thread);
/* report the task done and maybe get another one from the same
* mplx (= master connection), if we can be sticky.
*/
@@ -64,40 +62,20 @@ static void* APR_THREAD_FUNC execute(apr_thread_t *thread, void *wctx)
}
h2_worker *h2_worker_create(int id,
- apr_pool_t *parent_pool,
+ apr_pool_t *pool,
apr_threadattr_t *attr,
h2_worker_mplx_next_fn *get_next,
h2_worker_done_fn *worker_done,
void *ctx)
{
- apr_allocator_t *allocator = NULL;
- apr_pool_t *pool = NULL;
- h2_worker *w;
- apr_status_t status;
-
- apr_allocator_create(&allocator);
- apr_allocator_max_free_set(allocator, ap_max_mem_free);
- apr_pool_create_ex(&pool, parent_pool, NULL, allocator);
- apr_pool_tag(pool, "h2_worker");
- apr_allocator_owner_set(allocator, pool);
-
- w = apr_pcalloc(pool, sizeof(h2_worker));
+ h2_worker *w = apr_pcalloc(pool, sizeof(h2_worker));
if (w) {
- APR_RING_ELEM_INIT(w, link);
-
w->id = id;
- w->pool = pool;
-
+ APR_RING_ELEM_INIT(w, link);
w->get_next = get_next;
w->worker_done = worker_done;
w->ctx = ctx;
-
- status = apr_thread_cond_create(&w->io, w->pool);
- if (status != APR_SUCCESS) {
- return NULL;
- }
-
- apr_thread_create(&w->thread, attr, execute, w, w->pool);
+ apr_thread_create(&w->thread, attr, execute, w, pool);
}
return w;
}
@@ -109,22 +87,9 @@ apr_status_t h2_worker_destroy(h2_worker *worker)
apr_thread_join(&status, worker->thread);
worker->thread = NULL;
}
- if (worker->io) {
- apr_thread_cond_destroy(worker->io);
- worker->io = NULL;
- }
- if (worker->pool) {
- apr_pool_destroy(worker->pool);
- /* worker is gone */
- }
return APR_SUCCESS;
}
-int h2_worker_get_id(h2_worker *worker)
-{
- return worker->id;
-}
-
void h2_worker_abort(h2_worker *worker)
{
worker->aborted = 1;
diff --git a/modules/http2/h2_worker.h b/modules/http2/h2_worker.h
index 7a8c254f..04ff5703 100644
--- a/modules/http2/h2_worker.h
+++ b/modules/http2/h2_worker.h
@@ -16,7 +16,6 @@
#ifndef __mod_h2__h2_worker__
#define __mod_h2__h2_worker__
-struct apr_thread_cond_t;
struct h2_mplx;
struct h2_request;
struct h2_task;
@@ -39,19 +38,14 @@ typedef void h2_worker_done_fn(h2_worker *worker, void *ctx);
struct h2_worker {
+ int id;
/** Links to the rest of the workers */
APR_RING_ENTRY(h2_worker) link;
-
- int id;
apr_thread_t *thread;
- apr_pool_t *pool;
- struct apr_thread_cond_t *io;
-
h2_worker_mplx_next_fn *get_next;
h2_worker_done_fn *worker_done;
void *ctx;
-
- unsigned int aborted : 1;
+ int aborted;
};
/**
@@ -136,8 +130,6 @@ apr_status_t h2_worker_destroy(h2_worker *worker);
void h2_worker_abort(h2_worker *worker);
-int h2_worker_get_id(h2_worker *worker);
-
int h2_worker_is_aborted(h2_worker *worker);
#endif /* defined(__mod_h2__h2_worker__) */
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
index 2c1dc8da..2a159991 100644
--- a/modules/http2/h2_workers.c
+++ b/modules/http2/h2_workers.c
@@ -116,7 +116,7 @@ static apr_status_t get_mplx_next(h2_worker *worker, void *ctx,
if (status == APR_SUCCESS) {
++workers->idle_workers;
ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_worker(%d): looking for work", h2_worker_get_id(worker));
+ "h2_worker(%d): looking for work", worker->id);
while (!h2_worker_is_aborted(worker) && !workers->aborted
&& !(task = next_task(workers))) {
@@ -195,7 +195,7 @@ static void worker_done(h2_worker *worker, void *ctx)
apr_status_t status = apr_thread_mutex_lock(workers->lock);
if (status == APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_worker(%d): done", h2_worker_get_id(worker));
+ "h2_worker(%d): done", worker->id);
H2_WORKER_REMOVE(worker);
--workers->worker_count;
H2_WORKER_LIST_INSERT_TAIL(&workers->zombies, worker);
@@ -213,7 +213,7 @@ static apr_status_t add_worker(h2_workers *workers)
return APR_ENOMEM;
}
ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_workers: adding worker(%d)", h2_worker_get_id(w));
+ "h2_workers: adding worker(%d)", w->id);
++workers->worker_count;
H2_WORKER_LIST_INSERT_TAIL(&workers->workers, w);
return APR_SUCCESS;
diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
index 0d339691..480917a4 100644
--- a/modules/http2/mod_http2.c
+++ b/modules/http2/mod_http2.c
@@ -57,6 +57,13 @@ AP_DECLARE_MODULE(http2) = {
static int h2_h2_fixups(request_rec *r);
+typedef struct {
+ unsigned int change_prio : 1;
+ unsigned int sha256 : 1;
+} features;
+
+static features myfeats;
+
/* The module initialization. Called once as apache hook, before any multi
* processing (threaded or not) happens. It is typically at least called twice,
* see
@@ -77,7 +84,16 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog,
const char *mod_h2_init_key = "mod_http2_init_counter";
nghttp2_info *ngh2;
apr_status_t status;
+ const char *sep = "";
+
(void)plog;(void)ptemp;
+#ifdef H2_NG2_CHANGE_PRIO
+ myfeats.change_prio = 1;
+ sep = "+";
+#endif
+#ifdef H2_OPENSSL
+ myfeats.sha256 = 1;
+#endif
apr_pool_userdata_get(&data, mod_h2_init_key, s->process->pool);
if ( data == NULL ) {
@@ -90,8 +106,11 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog,
ngh2 = nghttp2_version(0);
ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03090)
- "mod_http2 (v%s, nghttp2 %s), initializing...",
- MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
+ "mod_http2 (v%s, feats=%s%s%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION,
+ myfeats.change_prio? "CHPRIO" : "", sep,
+ myfeats.sha256? "SHA256" : "",
+ ngh2? ngh2->version_str : "unknown");
switch (h2_conn_mpm_type()) {
case H2_MPM_SIMPLE:
diff --git a/modules/http2/mod_http2.dep b/modules/http2/mod_http2.dep
index 8b8ebe6e..6e1a2b1e 100644
--- a/modules/http2/mod_http2.dep
+++ b/modules/http2/mod_http2.dep
@@ -103,7 +103,6 @@
".\h2.h"\
".\h2_bucket_eoc.h"\
".\h2_conn_io.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_session.h"\
@@ -154,7 +153,6 @@
"..\..\srclib\apr\include\apr_want.h"\
".\h2.h"\
".\h2_bucket_eos.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_stream.h"\
@@ -279,7 +277,6 @@
".\h2_ctx.h"\
".\h2_filter.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_session.h"\
@@ -453,7 +450,6 @@
".\h2_conn_io.h"\
".\h2_ctx.h"\
".\h2_filter.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_push.h"\
@@ -523,7 +519,6 @@
".\h2_private.h"\
".\h2_response.h"\
".\h2_task.h"\
- ".\h2_task_output.h"\
".\h2_util.h"\
@@ -584,7 +579,6 @@
".\h2_conn_io.h"\
".\h2_ctx.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_private.h"\
".\h2_request.h"\
".\h2_session.h"\
@@ -602,7 +596,6 @@
"..\..\srclib\apr\include\apr_pools.h"\
"..\..\srclib\apr\include\apr_thread_mutex.h"\
"..\..\srclib\apr\include\apr_want.h"\
- ".\h2_int_queue.h"\
./h2_io.c : \
@@ -652,7 +645,6 @@
"..\..\srclib\apr\include\apr_want.h"\
".\h2.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_request.h"\
@@ -704,7 +696,6 @@
"..\..\srclib\apr\include\apr_time.h"\
"..\..\srclib\apr\include\apr_user.h"\
"..\..\srclib\apr\include\apr_want.h"\
- ".\h2_io.h"\
".\h2_io_set.h"\
".\h2_private.h"\
@@ -758,9 +749,6 @@
".\h2_conn.h"\
".\h2_ctx.h"\
".\h2_h2.h"\
- ".\h2_int_queue.h"\
- ".\h2_io.h"\
- ".\h2_io_set.h"\
".\h2_mplx.h"\
".\h2_ngn_shed.h"\
".\h2_private.h"\
@@ -768,8 +756,6 @@
".\h2_response.h"\
".\h2_stream.h"\
".\h2_task.h"\
- ".\h2_task_input.h"\
- ".\h2_task_output.h"\
".\h2_util.h"\
".\h2_worker.h"\
".\h2_workers.h"\
@@ -825,15 +811,12 @@
".\h2_conn.h"\
".\h2_ctx.h"\
".\h2_h2.h"\
- ".\h2_int_queue.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_ngn_shed.h"\
".\h2_private.h"\
".\h2_request.h"\
".\h2_response.h"\
".\h2_task.h"\
- ".\h2_task_output.h"\
".\h2_util.h"\
".\mod_http2.h"\
@@ -884,7 +867,6 @@
".\h2.h"\
".\h2_conn_io.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_private.h"\
".\h2_push.h"\
".\h2_request.h"\
@@ -1068,7 +1050,6 @@
".\h2_filter.h"\
".\h2_from_h1.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_push.h"\
@@ -1132,7 +1113,6 @@
".\h2_ctx.h"\
".\h2_filter.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_push.h"\
@@ -1141,7 +1121,6 @@
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
- ".\h2_task_input.h"\
".\h2_util.h"\
@@ -1266,15 +1245,12 @@
".\h2_ctx.h"\
".\h2_from_h1.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_request.h"\
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
- ".\h2_task_input.h"\
- ".\h2_task_output.h"\
".\h2_worker.h"\
@@ -1324,14 +1300,12 @@
".\h2.h"\
".\h2_conn.h"\
".\h2_conn_io.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_request.h"\
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
- ".\h2_task_input.h"\
".\h2_util.h"\
@@ -1348,6 +1322,7 @@
"..\..\include\http_connection.h"\
"..\..\include\http_core.h"\
"..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
"..\..\include\httpd.h"\
"..\..\include\os.h"\
"..\..\include\util_cfgtree.h"\
@@ -1383,7 +1358,6 @@
".\h2_conn.h"\
".\h2_conn_io.h"\
".\h2_from_h1.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_request.h"\
@@ -1391,7 +1365,6 @@
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
- ".\h2_task_output.h"\
".\h2_util.h"\
@@ -1499,7 +1472,6 @@
".\h2_conn.h"\
".\h2_ctx.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_task.h"\
@@ -1559,7 +1531,6 @@
"..\..\srclib\apr\include\apr_user.h"\
"..\..\srclib\apr\include\apr_want.h"\
".\h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_task.h"\
@@ -1614,6 +1585,7 @@
"..\..\srclib\apr\include\apr_proc_mutex.h"\
"..\..\srclib\apr\include\apr_ring.h"\
"..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
"..\..\srclib\apr\include\apr_tables.h"\
"..\..\srclib\apr\include\apr_thread_mutex.h"\
"..\..\srclib\apr\include\apr_thread_proc.h"\
@@ -1628,7 +1600,6 @@
".\h2_ctx.h"\
".\h2_filter.h"\
".\h2_h2.h"\
- ".\h2_io.h"\
".\h2_mplx.h"\
".\h2_push.h"\
".\h2_request.h"\
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
index eb55028a..94941487 100644
--- a/modules/http2/mod_http2.dsp
+++ b/modules/http2/mod_http2.dsp
@@ -105,6 +105,10 @@ SOURCE=./h2_alt_svc.c
# End Source File
# Begin Source File
+SOURCE=./h2_bucket_beam.c
+# End Source File
+# Begin Source File
+
SOURCE=./h2_bucket_eoc.c
# End Source File
# Begin Source File
@@ -141,18 +145,6 @@ SOURCE=./h2_h2.c
# End Source File
# Begin Source File
-SOURCE=./h2_int_queue.c
-# End Source File
-# Begin Source File
-
-SOURCE=./h2_io.c
-# End Source File
-# Begin Source File
-
-SOURCE=./h2_io_set.c
-# End Source File
-# Begin Source File
-
SOURCE=./h2_mplx.c
# End Source File
# Begin Source File
@@ -189,14 +181,6 @@ SOURCE=./h2_task.c
# End Source File
# Begin Source File
-SOURCE=./h2_task_input.c
-# End Source File
-# Begin Source File
-
-SOURCE=./h2_task_output.c
-# End Source File
-# Begin Source File
-
SOURCE=./h2_util.c
# End Source File
# Begin Source File
diff --git a/modules/http2/mod_http2.mak b/modules/http2/mod_http2.mak
index 75dcc1b0..7f8b30cc 100644
--- a/modules/http2/mod_http2.mak
+++ b/modules/http2/mod_http2.mak
@@ -50,6 +50,7 @@ CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd
CLEAN :
!ENDIF
-@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
-@erase "$(INTDIR)\h2_bucket_eoc.obj"
-@erase "$(INTDIR)\h2_bucket_eos.obj"
-@erase "$(INTDIR)\h2_config.obj"
@@ -59,9 +60,6 @@ CLEAN :
-@erase "$(INTDIR)\h2_filter.obj"
-@erase "$(INTDIR)\h2_from_h1.obj"
-@erase "$(INTDIR)\h2_h2.obj"
- -@erase "$(INTDIR)\h2_int_queue.obj"
- -@erase "$(INTDIR)\h2_io.obj"
- -@erase "$(INTDIR)\h2_io_set.obj"
-@erase "$(INTDIR)\h2_mplx.obj"
-@erase "$(INTDIR)\h2_ngn_shed.obj"
-@erase "$(INTDIR)\h2_push.obj"
@@ -71,8 +69,6 @@ CLEAN :
-@erase "$(INTDIR)\h2_stream.obj"
-@erase "$(INTDIR)\h2_switch.obj"
-@erase "$(INTDIR)\h2_task.obj"
- -@erase "$(INTDIR)\h2_task_input.obj"
- -@erase "$(INTDIR)\h2_task_output.obj"
-@erase "$(INTDIR)\h2_util.obj"
-@erase "$(INTDIR)\h2_worker.obj"
-@erase "$(INTDIR)\h2_workers.obj"
@@ -133,6 +129,7 @@ LINK32=link.exe
LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref
LINK32_OBJS= \
"$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
"$(INTDIR)\h2_bucket_eoc.obj" \
"$(INTDIR)\h2_bucket_eos.obj" \
"$(INTDIR)\h2_config.obj" \
@@ -142,9 +139,6 @@ LINK32_OBJS= \
"$(INTDIR)\h2_filter.obj" \
"$(INTDIR)\h2_from_h1.obj" \
"$(INTDIR)\h2_h2.obj" \
- "$(INTDIR)\h2_int_queue.obj" \
- "$(INTDIR)\h2_io.obj" \
- "$(INTDIR)\h2_io_set.obj" \
"$(INTDIR)\h2_mplx.obj" \
"$(INTDIR)\h2_ngn_shed.obj" \
"$(INTDIR)\h2_push.obj" \
@@ -154,8 +148,6 @@ LINK32_OBJS= \
"$(INTDIR)\h2_stream.obj" \
"$(INTDIR)\h2_switch.obj" \
"$(INTDIR)\h2_task.obj" \
- "$(INTDIR)\h2_task_input.obj" \
- "$(INTDIR)\h2_task_output.obj" \
"$(INTDIR)\h2_util.obj" \
"$(INTDIR)\h2_worker.obj" \
"$(INTDIR)\h2_workers.obj" \
@@ -208,6 +200,7 @@ CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - W
CLEAN :
!ENDIF
-@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
-@erase "$(INTDIR)\h2_bucket_eoc.obj"
-@erase "$(INTDIR)\h2_bucket_eos.obj"
-@erase "$(INTDIR)\h2_config.obj"
@@ -217,9 +210,6 @@ CLEAN :
-@erase "$(INTDIR)\h2_filter.obj"
-@erase "$(INTDIR)\h2_from_h1.obj"
-@erase "$(INTDIR)\h2_h2.obj"
- -@erase "$(INTDIR)\h2_int_queue.obj"
- -@erase "$(INTDIR)\h2_io.obj"
- -@erase "$(INTDIR)\h2_io_set.obj"
-@erase "$(INTDIR)\h2_mplx.obj"
-@erase "$(INTDIR)\h2_ngn_shed.obj"
-@erase "$(INTDIR)\h2_push.obj"
@@ -229,8 +219,6 @@ CLEAN :
-@erase "$(INTDIR)\h2_stream.obj"
-@erase "$(INTDIR)\h2_switch.obj"
-@erase "$(INTDIR)\h2_task.obj"
- -@erase "$(INTDIR)\h2_task_input.obj"
- -@erase "$(INTDIR)\h2_task_output.obj"
-@erase "$(INTDIR)\h2_util.obj"
-@erase "$(INTDIR)\h2_worker.obj"
-@erase "$(INTDIR)\h2_workers.obj"
@@ -291,6 +279,7 @@ LINK32=link.exe
LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
LINK32_OBJS= \
"$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
"$(INTDIR)\h2_bucket_eoc.obj" \
"$(INTDIR)\h2_bucket_eos.obj" \
"$(INTDIR)\h2_config.obj" \
@@ -300,9 +289,6 @@ LINK32_OBJS= \
"$(INTDIR)\h2_filter.obj" \
"$(INTDIR)\h2_from_h1.obj" \
"$(INTDIR)\h2_h2.obj" \
- "$(INTDIR)\h2_int_queue.obj" \
- "$(INTDIR)\h2_io.obj" \
- "$(INTDIR)\h2_io_set.obj" \
"$(INTDIR)\h2_mplx.obj" \
"$(INTDIR)\h2_ngn_shed.obj" \
"$(INTDIR)\h2_push.obj" \
@@ -312,8 +298,6 @@ LINK32_OBJS= \
"$(INTDIR)\h2_stream.obj" \
"$(INTDIR)\h2_switch.obj" \
"$(INTDIR)\h2_task.obj" \
- "$(INTDIR)\h2_task_input.obj" \
- "$(INTDIR)\h2_task_output.obj" \
"$(INTDIR)\h2_util.obj" \
"$(INTDIR)\h2_worker.obj" \
"$(INTDIR)\h2_workers.obj" \
@@ -438,6 +422,11 @@ SOURCE=./h2_alt_svc.c
"$(INTDIR)\h2_alt_svc.obj" : $(SOURCE) "$(INTDIR)"
+SOURCE=./h2_bucket_beam.c
+
+"$(INTDIR)/h2_bucket_beam.obj" : $(SOURCE) "$(INTDIR)"
+
+
SOURCE=./h2_bucket_eoc.c
"$(INTDIR)\h2_bucket_eoc.obj" : $(SOURCE) "$(INTDIR)"
@@ -483,21 +472,6 @@ SOURCE=./h2_h2.c
"$(INTDIR)\h2_h2.obj" : $(SOURCE) "$(INTDIR)"
-SOURCE=./h2_int_queue.c
-
-"$(INTDIR)\h2_int_queue.obj" : $(SOURCE) "$(INTDIR)"
-
-
-SOURCE=./h2_io.c
-
-"$(INTDIR)\h2_io.obj" : $(SOURCE) "$(INTDIR)"
-
-
-SOURCE=./h2_io_set.c
-
-"$(INTDIR)\h2_io_set.obj" : $(SOURCE) "$(INTDIR)"
-
-
SOURCE=./h2_mplx.c
"$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)"
@@ -543,16 +517,6 @@ SOURCE=./h2_task.c
"$(INTDIR)\h2_task.obj" : $(SOURCE) "$(INTDIR)"
-SOURCE=./h2_task_input.c
-
-"$(INTDIR)\h2_task_input.obj" : $(SOURCE) "$(INTDIR)"
-
-
-SOURCE=./h2_task_output.c
-
-"$(INTDIR)\h2_task_output.obj" : $(SOURCE) "$(INTDIR)"
-
-
SOURCE=./h2_util.c
"$(INTDIR)\h2_util.obj" : $(SOURCE) "$(INTDIR)"
@@ -574,14 +538,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
new file mode 100644
index 00000000..df1d7811
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.c
@@ -0,0 +1,650 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <nghttp2/nghttp2.h>
+
+#include <httpd.h>
+#include <mod_proxy.h>
+#include "mod_http2.h"
+
+
+#include "mod_proxy_http2.h"
+#include "h2_request.h"
+#include "h2_proxy_util.h"
+#include "h2_version.h"
+#include "h2_proxy_session.h"
+
+static void register_hook(apr_pool_t *p);
+
+AP_DECLARE_MODULE(proxy_http2) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ register_hook /* register hooks */
+};
+
+/* Optional functions from mod_http2 */
+static int (*is_h2)(conn_rec *c);
+static apr_status_t (*req_engine_push)(const char *name, request_rec *r,
+ http2_req_engine_init *einit);
+static apr_status_t (*req_engine_pull)(h2_req_engine *engine,
+ apr_read_type_e block,
+ apr_uint32_t capacity,
+ request_rec **pr);
+static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn);
+
+typedef struct h2_proxy_ctx {
+ conn_rec *owner;
+ apr_pool_t *pool;
+ request_rec *rbase;
+ server_rec *server;
+ const char *proxy_func;
+ char server_portstr[32];
+ proxy_conn_rec *p_conn;
+ proxy_worker *worker;
+ proxy_server_conf *conf;
+
+ h2_req_engine *engine;
+ const char *engine_id;
+ const char *engine_type;
+ apr_pool_t *engine_pool;
+ apr_uint32_t req_buffer_size;
+ request_rec *next;
+ apr_size_t capacity;
+
+ unsigned standalone : 1;
+ unsigned is_ssl : 1;
+ unsigned flushall : 1;
+
+ apr_status_t r_status; /* status of our first request work */
+ h2_proxy_session *session; /* current http2 session against backend */
+} h2_proxy_ctx;
+
+static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data = NULL;
+ const char *init_key = "mod_proxy_http2_init_counter";
+ nghttp2_info *ngh2;
+ apr_status_t status = APR_SUCCESS;
+ (void)plog;(void)ptemp;
+
+ apr_pool_userdata_get(&data, init_key, s->process->pool);
+ if ( data == NULL ) {
+ apr_pool_userdata_set((const void *)1, init_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return APR_SUCCESS;
+ }
+
+ ngh2 = nghttp2_version(0);
+ ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03349)
+ "mod_proxy_http2 (v%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
+
+ is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2);
+ req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push);
+ req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull);
+ req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done);
+
+ /* we need all of them */
+ if (!req_engine_push || !req_engine_pull || !req_engine_done) {
+ req_engine_push = NULL;
+ req_engine_pull = NULL;
+ req_engine_done = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * canonicalize the url into the request, if it is meant for us.
+ * slightly modified copy from mod_http
+ */
+static int proxy_http2_canon(request_rec *r, char *url)
+{
+ char *host, *path, sport[7];
+ char *search = NULL;
+ const char *err;
+ const char *scheme;
+ const char *http_scheme;
+ apr_port_t port, def_port;
+
+ /* ap_port_of_scheme() */
+ if (ap_cstr_casecmpn(url, "h2c:", 4) == 0) {
+ url += 4;
+ scheme = "h2c";
+ http_scheme = "http";
+ }
+ else if (ap_cstr_casecmpn(url, "h2:", 3) == 0) {
+ url += 3;
+ scheme = "h2";
+ http_scheme = "https";
+ }
+ else {
+ return DECLINED;
+ }
+ port = def_port = ap_proxy_port_of_scheme(http_scheme);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "HTTP2: canonicalising URL %s", url);
+
+ /* do syntatic check.
+ * We break the URL into host, port, path, search
+ */
+ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03350)
+ "error parsing URL %s: %s", url, err);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ * now parse path/search args, according to rfc1738:
+ * process the path.
+ *
+ * In a reverse proxy, our URL has been processed, so canonicalise
+ * unless proxy-nocanon is set to say it's raw
+ * In a forward proxy, we have and MUST NOT MANGLE the original.
+ */
+ switch (r->proxyreq) {
+ default: /* wtf are we doing here? */
+ case PROXYREQ_REVERSE:
+ if (apr_table_get(r->notes, "proxy-nocanon")) {
+ path = url; /* this is the raw path */
+ }
+ else {
+ path = ap_proxy_canonenc(r->pool, url, strlen(url),
+ enc_path, 0, r->proxyreq);
+ search = r->args;
+ }
+ break;
+ case PROXYREQ_PROXY:
+ path = url;
+ break;
+ }
+
+ if (path == NULL) {
+ return HTTP_BAD_REQUEST;
+ }
+
+ if (port != def_port) {
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+ }
+ else {
+ sport[0] = '\0';
+ }
+
+ if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
+ host = apr_pstrcat(r->pool, "[", host, "]", NULL);
+ }
+ r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport,
+ "/", path, (search) ? "?" : "", (search) ? search : "", NULL);
+ return OK;
+}
+
+static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes)
+{
+ h2_proxy_ctx *ctx = baton;
+
+ if (ctx->session) {
+ h2_proxy_session_update_window(ctx->session, c, bytes);
+ }
+}
+
+static apr_status_t proxy_engine_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_uint32_t req_buffer_size,
+ request_rec *r,
+ http2_output_consumed **pconsumed,
+ void **pctx)
+{
+ h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
+ &proxy_http2_module);
+ if (ctx) {
+ conn_rec *c = ctx->owner;
+ h2_proxy_ctx *nctx;
+
+ /* we need another lifetime for this. If we do not host
+ * an engine, the context lives in r->pool. Since we expect
+ * to server more than r, we need to live longer */
+ nctx = apr_pcalloc(pool, sizeof(*nctx));
+ if (nctx == NULL) {
+ return APR_ENOMEM;
+ }
+ memcpy(nctx, ctx, sizeof(*nctx));
+ ctx = nctx;
+ ctx->pool = pool;
+ ctx->engine = engine;
+ ctx->engine_id = id;
+ ctx->engine_type = type;
+ ctx->engine_pool = pool;
+ ctx->req_buffer_size = req_buffer_size;
+ ctx->capacity = 100;
+
+ ap_set_module_config(c->conn_config, &proxy_http2_module, ctx);
+
+ *pconsumed = out_consumed;
+ *pctx = ctx;
+ return APR_SUCCESS;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
+ "h2_proxy_session, engine init, no ctx found");
+ return APR_ENOTIMPL;
+}
+
+static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
+{
+ h2_proxy_ctx *ctx = session->user_data;
+ const char *url;
+ apr_status_t status;
+
+ url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE);
+ apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
+ ctx->p_conn->connection->local_addr->port));
+ status = h2_proxy_session_submit(session, url, r, ctx->standalone);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351)
+ "pass request body failed to %pI (%s) from %s (%s)",
+ ctx->p_conn->addr, ctx->p_conn->hostname ?
+ ctx->p_conn->hostname: "", session->c->client_ip,
+ session->c->remote_host ? session->c->remote_host: "");
+ }
+ return status;
+}
+
+static void request_done(h2_proxy_session *session, request_rec *r,
+ int complete, int touched)
+{
+ h2_proxy_ctx *ctx = session->user_data;
+ const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
+
+ if (!complete && !touched) {
+ /* untouched request, need rescheduling */
+ if (req_engine_push && is_h2 && is_h2(ctx->owner)) {
+ if (req_engine_push(ctx->engine_type, r, NULL) == APR_SUCCESS) {
+ /* push to engine */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
+ APLOGNO(03369)
+ "h2_proxy_session(%s): rescheduled request %s",
+ ctx->engine_id, task_id);
+ return;
+ }
+ }
+ }
+
+ if (r == ctx->rbase && complete) {
+ ctx->r_status = APR_SUCCESS;
+ }
+
+ if (complete) {
+ if (req_engine_done && ctx->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
+ APLOGNO(03370)
+ "h2_proxy_session(%s): finished request %s",
+ ctx->engine_id, task_id);
+ req_engine_done(ctx->engine, r->connection);
+ }
+ }
+ else {
+ if (req_engine_done && ctx->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
+ APLOGNO(03371)
+ "h2_proxy_session(%s): failed request %s",
+ ctx->engine_id, task_id);
+ req_engine_done(ctx->engine, r->connection);
+ }
+ }
+}
+
+static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave)
+{
+ if (ctx->next) {
+ return APR_SUCCESS;
+ }
+ else if (req_engine_pull && ctx->engine) {
+ apr_status_t status;
+ status = req_engine_pull(ctx->engine, before_leave?
+ APR_BLOCK_READ: APR_NONBLOCK_READ,
+ ctx->capacity, &ctx->next);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, ctx->owner,
+ "h2_proxy_engine(%s): pulled request (%s) %s",
+ ctx->engine_id,
+ before_leave? "before leave" : "regular",
+ (ctx->next? ctx->next->the_request : "NULL"));
+ return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status;
+ }
+ return APR_EOF;
+}
+
+static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
+ apr_status_t status = OK;
+
+ /* Step Four: Send the Request in a new HTTP/2 stream and
+ * loop until we got the response or encounter errors.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
+ "eng(%s): setup session", ctx->engine_id);
+ ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
+ 30, h2_log2(ctx->req_buffer_size),
+ request_done);
+ if (!ctx->session) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner,
+ APLOGNO(03372) "session unavailable");
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
+ "eng(%s): run session %s", ctx->engine_id, ctx->session->id);
+ ctx->session->user_data = ctx;
+
+ while (1) {
+ if (ctx->next) {
+ add_request(ctx->session, ctx->next);
+ ctx->next = NULL;
+ }
+
+ status = h2_proxy_session_process(ctx->session);
+
+ if (status == APR_SUCCESS) {
+ apr_status_t s2;
+ /* ongoing processing, call again */
+ if (ctx->session->remote_max_concurrent > 0
+ && ctx->session->remote_max_concurrent != ctx->capacity) {
+ ctx->capacity = ctx->session->remote_max_concurrent;
+ }
+ s2 = next_request(ctx, 0);
+ if (s2 == APR_ECONNABORTED) {
+ /* master connection gone */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner,
+ APLOGNO(03374) "eng(%s): pull request",
+ ctx->engine_id);
+ status = s2;
+ break;
+ }
+ if (!ctx->next && h2_ihash_empty(ctx->session->streams)) {
+ break;
+ }
+ }
+ else {
+ /* end of processing, maybe error */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03375) "eng(%s): end of session %s",
+ ctx->engine_id, ctx->session->id);
+ /*
+ * Any open stream of that session needs to
+ * a) be reopened on the new session iff safe to do so
+ * b) reported as done (failed) otherwise
+ */
+ h2_proxy_session_cleanup(ctx->session, request_done);
+ break;
+ }
+ }
+
+ ctx->session->user_data = NULL;
+ ctx->session = NULL;
+
+ return status;
+}
+
+static h2_proxy_ctx *push_request_somewhere(h2_proxy_ctx *ctx)
+{
+ conn_rec *c = ctx->owner;
+ const char *engine_type, *hostname;
+
+ hostname = (ctx->p_conn->ssl_hostname?
+ ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
+ engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
+ ctx->server_portstr);
+
+ if (c->master && req_engine_push && ctx->next && is_h2 && is_h2(c)) {
+ /* If we are have req_engine capabilities, push the handling of this
+ * request (e.g. slave connection) to a proxy_http2 engine which
+ * uses the same backend. We may be called to create an engine
+ * ourself. */
+ if (req_engine_push(engine_type, ctx->next, proxy_engine_init)
+ == APR_SUCCESS) {
+ /* to renew the lifetime, we might have set a new ctx */
+ ctx = ap_get_module_config(c->conn_config, &proxy_http2_module);
+ if (ctx->engine == NULL) {
+ /* Another engine instance has taken over processing of this
+ * request. */
+ ctx->r_status = SUSPENDED;
+ ctx->next = NULL;
+ return ctx;
+ }
+ }
+ }
+
+ if (!ctx->engine) {
+ /* No engine was available or has been initialized, handle this
+ * request just by ourself. */
+ ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
+ ctx->engine_type = engine_type;
+ ctx->engine_pool = ctx->pool;
+ ctx->req_buffer_size = (32*1024);
+ ctx->standalone = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_proxy_http2(%ld): setup standalone engine for type %s",
+ c->id, engine_type);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "H2: hosting engine %s", ctx->engine_id);
+ }
+ return ctx;
+}
+
+static int proxy_http2_handler(request_rec *r,
+ proxy_worker *worker,
+ proxy_server_conf *conf,
+ char *url,
+ const char *proxyname,
+ apr_port_t proxyport)
+{
+ const char *proxy_func;
+ char *locurl = url, *u;
+ apr_size_t slen;
+ int is_ssl = 0;
+ apr_status_t status;
+ h2_proxy_ctx *ctx;
+ apr_uri_t uri;
+ int reconnected = 0;
+
+ /* find the scheme */
+ if ((url[0] != 'h' && url[0] != 'H') || url[1] != '2') {
+ return DECLINED;
+ }
+ u = strchr(url, ':');
+ if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0') {
+ return DECLINED;
+ }
+ slen = (u - url);
+ switch(slen) {
+ case 2:
+ proxy_func = "H2";
+ is_ssl = 1;
+ break;
+ case 3:
+ if (url[2] != 'c' && url[2] != 'C') {
+ return DECLINED;
+ }
+ proxy_func = "H2C";
+ break;
+ default:
+ return DECLINED;
+ }
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->owner = r->connection;
+ ctx->pool = r->pool;
+ ctx->rbase = r;
+ ctx->server = r->server;
+ ctx->proxy_func = proxy_func;
+ ctx->is_ssl = is_ssl;
+ ctx->worker = worker;
+ ctx->conf = conf;
+ ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
+ ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
+ ctx->next = r;
+ r = NULL;
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
+
+ /* scheme says, this is for us. */
+ apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase,
+ "H2: serving URL %s", url);
+
+run_connect:
+ /* Get a proxy_conn_rec from the worker, might be a new one, might
+ * be one still open from another request, or it might fail if the
+ * worker is stopped or in error. */
+ if ((status = ap_proxy_acquire_connection(ctx->proxy_func, &ctx->p_conn,
+ ctx->worker, ctx->server)) != OK) {
+ goto cleanup;
+ }
+
+ ctx->p_conn->is_ssl = ctx->is_ssl;
+ if (ctx->is_ssl && ctx->p_conn->connection) {
+ /* If there are some metadata on the connection (e.g. TLS alert),
+ * let mod_ssl detect them, and create a new connection below.
+ */
+ apr_bucket_brigade *tmp_bb;
+ tmp_bb = apr_brigade_create(ctx->rbase->pool,
+ ctx->rbase->connection->bucket_alloc);
+ status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb,
+ AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1);
+ if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+ ctx->p_conn->close = 1;
+ }
+ apr_brigade_cleanup(tmp_bb);
+ }
+
+ /* Step One: Determine the URL to connect to (might be a proxy),
+ * initialize the backend accordingly and determine the server
+ * port string we can expect in responses. */
+ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
+ ctx->p_conn, &uri, &locurl,
+ proxyname, proxyport,
+ ctx->server_portstr,
+ sizeof(ctx->server_portstr))) != OK) {
+ goto cleanup;
+ }
+
+ /* If we are not already hosting an engine, try to push the request
+ * to an already existing engine or host a new engine here. */
+ if (!ctx->engine) {
+ ctx = push_request_somewhere(ctx);
+ if (ctx->r_status == SUSPENDED) {
+ /* request was pushed to another engine */
+ goto cleanup;
+ }
+ }
+
+ /* Step Two: Make the Connection (or check that an already existing
+ * socket is still usable). On success, we have a socket connected to
+ * backend->hostname. */
+ if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker,
+ ctx->server)) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, ctx->owner, APLOGNO(03352)
+ "H2: failed to make connection to backend: %s",
+ ctx->p_conn->hostname);
+ goto cleanup;
+ }
+
+ /* Step Three: Create conn_rec for the socket we have open now. */
+ if (!ctx->p_conn->connection) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+ "setup new connection: is_ssl=%d %s %s %s",
+ ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
+ locurl, ctx->p_conn->hostname);
+ if ((status = ap_proxy_connection_create(ctx->proxy_func, ctx->p_conn,
+ ctx->owner,
+ ctx->server)) != OK) {
+ goto cleanup;
+ }
+
+ /*
+ * On SSL connections set a note on the connection what CN is
+ * requested, such that mod_ssl can check if it is requested to do
+ * so.
+ */
+ if (ctx->p_conn->ssl_hostname) {
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-hostname", ctx->p_conn->ssl_hostname);
+ }
+
+ if (ctx->is_ssl) {
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-alpn-protos", "h2");
+ }
+ }
+
+run_session:
+ status = proxy_engine_run(ctx);
+ if (status == APR_SUCCESS) {
+ /* session and connection still ok */
+ if (next_request(ctx, 1) == APR_SUCCESS) {
+ /* more requests, run again */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376)
+ "run_session, again");
+ goto run_session;
+ }
+ /* done */
+ ctx->engine = NULL;
+ }
+
+cleanup:
+ if (!reconnected && ctx->engine && next_request(ctx, 1) == APR_SUCCESS) {
+ /* Still more to do, tear down old conn and start over */
+ if (ctx->p_conn) {
+ ctx->p_conn->close = 1;
+ /*only in trunk so far */
+ /*proxy_run_detach_backend(r, ctx->p_conn);*/
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+ reconnected = 1; /* we do this only once, then fail */
+ goto run_connect;
+ }
+
+ if (ctx->p_conn) {
+ if (status != APR_SUCCESS) {
+ /* close socket when errors happened or session shut down (EOF) */
+ ctx->p_conn->close = 1;
+ }
+ /*only in trunk so far */
+ /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03377) "leaving handler");
+ return ctx->r_status;
+}
+
+static void register_hook(apr_pool_t *p)
+{
+ ap_hook_post_config(h2_proxy_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+
+ proxy_hook_scheme_handler(proxy_http2_handler, NULL, NULL, APR_HOOK_FIRST);
+ proxy_hook_canon_handler(proxy_http2_canon, NULL, NULL, APR_HOOK_FIRST);
+}
+
diff --git a/modules/http2/mod_proxy_http2.dep b/modules/http2/mod_proxy_http2.dep
new file mode 100644
index 00000000..641fca64
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dep
@@ -0,0 +1,208 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_proxy_http2.mak
+
+./h2_proxy_session.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_proxy_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_proxy_util.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_util.h"\
+
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+./mod_proxy_http2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_request.h"\
+ ".\h2_proxy_util.h"\
+ ".\h2_version.h"\
+ ".\mod_http2.h"\
+ ".\mod_proxy_http2.h"\
+
diff --git a/modules/http2/mod_proxy_http2.dsp b/modules/http2/mod_proxy_http2.dsp
new file mode 100644
index 00000000..5d6305fd
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dsp
@@ -0,0 +1,119 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_http2" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_http2 - Win32 Release"
+# Name "mod_proxy_http2 - Win32 Debug"
+# Begin Source File
+
+SOURCE=./h2_proxy_session.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_proxy_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_proxy_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http2/mod_h2.h b/modules/http2/mod_proxy_http2.h
index bb895dd2..7da84f0f 100644
--- a/modules/http2/mod_h2.h
+++ b/modules/http2/mod_proxy_http2.h
@@ -13,7 +13,8 @@
* limitations under the License.
*/
-#ifndef mod_h2_mod_h2_h
-#define mod_h2_mod_h2_h
+#ifndef __MOD_PROXY_HTTP2_H__
+#define __MOD_PROXY_HTTP2_H__
+
#endif
diff --git a/modules/http2/mod_proxy_http2.mak b/modules/http2/mod_proxy_http2.mak
new file mode 100644
index 00000000..e8e06241
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.mak
@@ -0,0 +1,427 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_proxy_http2.dsp
+!IF "$(CFG)" == ""
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_proxy_http2 - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_proxy_http2 - Win32 Release" && "$(CFG)" != "mod_proxy_http2 - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Release" "mod_http2 - Win32 Release" "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN" "mod_http2 - Win32 ReleaseCLEAN" "mod_proxy - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Release\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Release\mod_proxy_http2.so.manifest mt.exe -manifest .\Release\mod_proxy_http2.so.manifest -outputresource:.\Release\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Debug" "mod_http2 - Win32 Debug" "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN" "mod_http2 - Win32 DebugCLEAN" "mod_proxy - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Debug\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Debug\mod_proxy_http2.so.manifest mt.exe -manifest .\Debug\mod_proxy_http2.so.manifest -outputresource:.\Debug\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_proxy_http2.dep")
+!INCLUDE "mod_proxy_http2.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_proxy_http2.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" || "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_http2 - Win32 Release" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release"
+ cd "."
+
+"mod_http2 - Win32 ReleaseCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release" RECURSE=1 CLEAN
+ cd "."
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_http2 - Win32 Debug" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug"
+ cd "."
+
+"mod_http2 - Win32 DebugCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug" RECURSE=1 CLEAN
+ cd "."
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_proxy - Win32 Release" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release"
+ cd "..\http2"
+
+"mod_proxy - Win32 ReleaseCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_proxy - Win32 Debug" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug"
+ cd "..\http2"
+
+"mod_proxy - Win32 DebugCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ENDIF
+
+SOURCE=./h2_proxy_session.c
+
+"$(INTDIR)\h2_proxy_session.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_proxy_util.c
+
+"$(INTDIR)\h2_proxy_util.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=./mod_proxy_http2.c
+
+"$(INTDIR)\mod_proxy_http2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+
diff --git a/modules/ldap/mod_ldap.mak b/modules/ldap/mod_ldap.mak
index 219f27dc..23ab7fea 100644
--- a/modules/ldap/mod_ldap.mak
+++ b/modules/ldap/mod_ldap.mak
@@ -339,14 +339,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_ldap.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_ldap.so" /d LONG_NAME="ldap_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_ldap.so" /d LONG_NAME="ldap_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_ldap - Win32 Debug"
"$(INTDIR)\mod_ldap.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_ldap.so" /d LONG_NAME="ldap_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ldap.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_ldap.so" /d LONG_NAME="ldap_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/loggers/mod_log_config.mak b/modules/loggers/mod_log_config.mak
index ef8d6c47..df9ddf6a 100644
--- a/modules/loggers/mod_log_config.mak
+++ b/modules/loggers/mod_log_config.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_log_config.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_config.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_log_config.so" /d LONG_NAME="log_config_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_config.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_log_config.so" /d LONG_NAME="log_config_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_log_config - Win32 Debug"
"$(INTDIR)\mod_log_config.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_config.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_log_config.so" /d LONG_NAME="log_config_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_config.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_log_config.so" /d LONG_NAME="log_config_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/loggers/mod_log_debug.mak b/modules/loggers/mod_log_debug.mak
index 2fb2c9c3..94b0beea 100644
--- a/modules/loggers/mod_log_debug.mak
+++ b/modules/loggers/mod_log_debug.mak
@@ -303,14 +303,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_log_debug.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_debug.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_log_debug.so" /d LONG_NAME="log_debug_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_debug.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_log_debug.so" /d LONG_NAME="log_debug_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_log_debug - Win32 Debug"
"$(INTDIR)\mod_log_debug.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_debug.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_log_debug.so" /d LONG_NAME="log_debug_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_debug.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_log_debug.so" /d LONG_NAME="log_debug_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/loggers/mod_log_forensic.mak b/modules/loggers/mod_log_forensic.mak
index 5f5c4534..0ad2e544 100644
--- a/modules/loggers/mod_log_forensic.mak
+++ b/modules/loggers/mod_log_forensic.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_log_forensic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_forensic.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_log_forensic.so" /d LONG_NAME="log_forensic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_forensic.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_log_forensic.so" /d LONG_NAME="log_forensic_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_log_forensic - Win32 Debug"
"$(INTDIR)\mod_log_forensic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_forensic.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_log_forensic.so" /d LONG_NAME="log_forensic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_log_forensic.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_log_forensic.so" /d LONG_NAME="log_forensic_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/loggers/mod_logio.mak b/modules/loggers/mod_logio.mak
index e0def701..363e8488 100644
--- a/modules/loggers/mod_logio.mak
+++ b/modules/loggers/mod_logio.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_logio.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_logio.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_logio.so" /d LONG_NAME="logio_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_logio.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_logio.so" /d LONG_NAME="logio_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_logio - Win32 Debug"
"$(INTDIR)\mod_logio.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_logio.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_logio.so" /d LONG_NAME="logio_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_logio.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_logio.so" /d LONG_NAME="logio_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/lua/mod_lua.mak b/modules/lua/mod_lua.mak
index 37c53c7a..114d6bd3 100644
--- a/modules/lua/mod_lua.mak
+++ b/modules/lua/mod_lua.mak
@@ -355,14 +355,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_lua.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lua.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_lua.so" /d LONG_NAME="lua_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lua.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_lua.so" /d LONG_NAME="lua_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_lua - Win32 Debug"
"$(INTDIR)\mod_lua.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lua.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_lua.so" /d LONG_NAME="lua_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lua.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_lua.so" /d LONG_NAME="lua_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_actions.mak b/modules/mappers/mod_actions.mak
index 4bbed8ea..28ec1e0e 100644
--- a/modules/mappers/mod_actions.mak
+++ b/modules/mappers/mod_actions.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_actions.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_actions.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_actions.so" /d LONG_NAME="actions_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_actions.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_actions.so" /d LONG_NAME="actions_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_actions - Win32 Debug"
"$(INTDIR)\mod_actions.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_actions.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_actions.so" /d LONG_NAME="actions_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_actions.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_actions.so" /d LONG_NAME="actions_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_alias.mak b/modules/mappers/mod_alias.mak
index 9c8276ea..17ad406f 100644
--- a/modules/mappers/mod_alias.mak
+++ b/modules/mappers/mod_alias.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_alias.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_alias.so" /d LONG_NAME="alias_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_alias.so" /d LONG_NAME="alias_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_alias - Win32 Debug"
"$(INTDIR)\mod_alias.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_alias.so" /d LONG_NAME="alias_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_alias.so" /d LONG_NAME="alias_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_dir.mak b/modules/mappers/mod_dir.mak
index 19a17fe8..d490010f 100644
--- a/modules/mappers/mod_dir.mak
+++ b/modules/mappers/mod_dir.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_dir.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dir.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_dir.so" /d LONG_NAME="dir_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dir.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_dir.so" /d LONG_NAME="dir_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_dir - Win32 Debug"
"$(INTDIR)\mod_dir.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dir.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_dir.so" /d LONG_NAME="dir_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_dir.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_dir.so" /d LONG_NAME="dir_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_imagemap.mak b/modules/mappers/mod_imagemap.mak
index b78b5392..da50a61d 100644
--- a/modules/mappers/mod_imagemap.mak
+++ b/modules/mappers/mod_imagemap.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_imagemap.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_imagemap.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_imagemap.so" /d LONG_NAME="imagemap_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_imagemap.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_imagemap.so" /d LONG_NAME="imagemap_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_imagemap - Win32 Debug"
"$(INTDIR)\mod_imagemap.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_imagemap.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_imagemap.so" /d LONG_NAME="imagemap_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_imagemap.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_imagemap.so" /d LONG_NAME="imagemap_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_negotiation.mak b/modules/mappers/mod_negotiation.mak
index bff7e24d..6de4c00e 100644
--- a/modules/mappers/mod_negotiation.mak
+++ b/modules/mappers/mod_negotiation.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_negotiation.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_negotiation.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_negotiation.so" /d LONG_NAME="negotiation_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_negotiation.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_negotiation.so" /d LONG_NAME="negotiation_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_negotiation - Win32 Debug"
"$(INTDIR)\mod_negotiation.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_negotiation.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_negotiation.so" /d LONG_NAME="negotiation_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_negotiation.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_negotiation.so" /d LONG_NAME="negotiation_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c
index d31b1c24..7b2737c5 100644
--- a/modules/mappers/mod_rewrite.c
+++ b/modules/mappers/mod_rewrite.c
@@ -560,6 +560,14 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs)
*sqs = 1;
return 8;
}
+ else if (!strncasecmp(uri, "2://", 4)) { /* h2:// */
+ *sqs = 1;
+ return 5;
+ }
+ else if (!strncasecmp(uri, "2c://", 5)) { /* h2c:// */
+ *sqs = 1;
+ return 6;
+ }
break;
case 'l':
diff --git a/modules/mappers/mod_rewrite.mak b/modules/mappers/mod_rewrite.mak
index d81c1650..3b08cabb 100644
--- a/modules/mappers/mod_rewrite.mak
+++ b/modules/mappers/mod_rewrite.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_rewrite.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_rewrite.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_rewrite.so" /d LONG_NAME="rewrite_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_rewrite.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_rewrite.so" /d LONG_NAME="rewrite_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_rewrite - Win32 Debug"
"$(INTDIR)\mod_rewrite.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_rewrite.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_rewrite.so" /d LONG_NAME="rewrite_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_rewrite.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_rewrite.so" /d LONG_NAME="rewrite_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_speling.mak b/modules/mappers/mod_speling.mak
index 9b329811..b49233f7 100644
--- a/modules/mappers/mod_speling.mak
+++ b/modules/mappers/mod_speling.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_speling.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_speling.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_speling.so" /d LONG_NAME="speling_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_speling.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_speling.so" /d LONG_NAME="speling_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_speling - Win32 Debug"
"$(INTDIR)\mod_speling.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_speling.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_speling.so" /d LONG_NAME="speling_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_speling.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_speling.so" /d LONG_NAME="speling_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_userdir.c b/modules/mappers/mod_userdir.c
index b181e278..1ec0e901 100644
--- a/modules/mappers/mod_userdir.c
+++ b/modules/mappers/mod_userdir.c
@@ -89,7 +89,7 @@ module AP_MODULE_DECLARE_DATA userdir_module;
typedef struct {
int globally_disabled;
- char *userdir;
+ const char *userdir;
apr_table_t *enabled_users;
apr_table_t *disabled_users;
} userdir_config;
@@ -137,7 +137,7 @@ static const char *set_user_dir(cmd_parms *cmd, void *dummy, const char *arg)
&userdir_module);
char *username;
const char *usernames = arg;
- char *kw = ap_getword_conf(cmd->pool, &usernames);
+ char *kw = ap_getword_conf(cmd->temp_pool, &usernames);
apr_table_t *usertable;
/* Since we are a raw argument, it is possible for us to be called with
@@ -173,7 +173,7 @@ static const char *set_user_dir(cmd_parms *cmd, void *dummy, const char *arg)
* If the first (only?) value isn't one of our keywords, just copy
* the string to the userdir string.
*/
- s_cfg->userdir = apr_pstrdup(cmd->pool, arg);
+ s_cfg->userdir = arg;
return NULL;
}
/*
@@ -182,7 +182,7 @@ static const char *set_user_dir(cmd_parms *cmd, void *dummy, const char *arg)
*/
while (*usernames) {
username = ap_getword_conf(cmd->pool, &usernames);
- apr_table_setn(usertable, username, kw);
+ apr_table_setn(usertable, username, "1");
}
return NULL;
}
diff --git a/modules/mappers/mod_userdir.mak b/modules/mappers/mod_userdir.mak
index bc8a5793..8087ce17 100644
--- a/modules/mappers/mod_userdir.mak
+++ b/modules/mappers/mod_userdir.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_userdir.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_userdir.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_userdir.so" /d LONG_NAME="userdir_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_userdir.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_userdir.so" /d LONG_NAME="userdir_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_userdir - Win32 Debug"
"$(INTDIR)\mod_userdir.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_userdir.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_userdir.so" /d LONG_NAME="userdir_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_userdir.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_userdir.so" /d LONG_NAME="userdir_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/mappers/mod_vhost_alias.mak b/modules/mappers/mod_vhost_alias.mak
index 7dc1767f..62085abe 100644
--- a/modules/mappers/mod_vhost_alias.mak
+++ b/modules/mappers/mod_vhost_alias.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_vhost_alias.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_vhost_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_vhost_alias.so" /d LONG_NAME="vhost_alias_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_vhost_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_vhost_alias.so" /d LONG_NAME="vhost_alias_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_vhost_alias - Win32 Debug"
"$(INTDIR)\mod_vhost_alias.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_vhost_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_vhost_alias.so" /d LONG_NAME="vhost_alias_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_vhost_alias.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_vhost_alias.so" /d LONG_NAME="vhost_alias_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_cern_meta.mak b/modules/metadata/mod_cern_meta.mak
index 1440fdcd..08ba4146 100644
--- a/modules/metadata/mod_cern_meta.mak
+++ b/modules/metadata/mod_cern_meta.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_cern_meta.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cern_meta.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_cern_meta.so" /d LONG_NAME="cern_meta_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cern_meta.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_cern_meta.so" /d LONG_NAME="cern_meta_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_cern_meta - Win32 Debug"
"$(INTDIR)\mod_cern_meta.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cern_meta.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_cern_meta.so" /d LONG_NAME="cern_meta_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_cern_meta.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_cern_meta.so" /d LONG_NAME="cern_meta_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_env.mak b/modules/metadata/mod_env.mak
index 48bcde2d..62078307 100644
--- a/modules/metadata/mod_env.mak
+++ b/modules/metadata/mod_env.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_env.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_env.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_env.so" /d LONG_NAME="env_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_env.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_env.so" /d LONG_NAME="env_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_env - Win32 Debug"
"$(INTDIR)\mod_env.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_env.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_env.so" /d LONG_NAME="env_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_env.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_env.so" /d LONG_NAME="env_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_expires.mak b/modules/metadata/mod_expires.mak
index ff12a8eb..db62d315 100644
--- a/modules/metadata/mod_expires.mak
+++ b/modules/metadata/mod_expires.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_expires.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_expires.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_expires.so" /d LONG_NAME="expires_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_expires.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_expires.so" /d LONG_NAME="expires_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_expires - Win32 Debug"
"$(INTDIR)\mod_expires.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_expires.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_expires.so" /d LONG_NAME="expires_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_expires.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_expires.so" /d LONG_NAME="expires_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_headers.mak b/modules/metadata/mod_headers.mak
index 2f24aac1..7f9cddd8 100644
--- a/modules/metadata/mod_headers.mak
+++ b/modules/metadata/mod_headers.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_headers.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_headers.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_headers.so" /d LONG_NAME="headers_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_headers.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_headers.so" /d LONG_NAME="headers_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_headers - Win32 Debug"
"$(INTDIR)\mod_headers.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_headers.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_headers.so" /d LONG_NAME="headers_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_headers.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_headers.so" /d LONG_NAME="headers_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_ident.mak b/modules/metadata/mod_ident.mak
index 29f6dacd..cc45e211 100644
--- a/modules/metadata/mod_ident.mak
+++ b/modules/metadata/mod_ident.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_ident.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ident.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_ident.so" /d LONG_NAME="ident_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ident.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_ident.so" /d LONG_NAME="ident_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_ident - Win32 Debug"
"$(INTDIR)\mod_ident.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ident.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_ident.so" /d LONG_NAME="ident_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ident.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_ident.so" /d LONG_NAME="ident_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_mime_magic.mak b/modules/metadata/mod_mime_magic.mak
index 3f27e570..f4573f75 100644
--- a/modules/metadata/mod_mime_magic.mak
+++ b/modules/metadata/mod_mime_magic.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_mime_magic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime_magic.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_mime_magic.so" /d LONG_NAME="mime_magic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime_magic.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_mime_magic.so" /d LONG_NAME="mime_magic_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_mime_magic - Win32 Debug"
"$(INTDIR)\mod_mime_magic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime_magic.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_mime_magic.so" /d LONG_NAME="mime_magic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_mime_magic.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_mime_magic.so" /d LONG_NAME="mime_magic_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_remoteip.mak b/modules/metadata/mod_remoteip.mak
index de38e974..e6cacfb4 100644
--- a/modules/metadata/mod_remoteip.mak
+++ b/modules/metadata/mod_remoteip.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_remoteip.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_remoteip.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_remoteip.so" /d LONG_NAME="remoteip_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_remoteip.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_remoteip.so" /d LONG_NAME="remoteip_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_remoteip - Win32 Debug"
"$(INTDIR)\mod_remoteip.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_remoteip.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_remoteip.so" /d LONG_NAME="remoteip_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_remoteip.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_remoteip.so" /d LONG_NAME="remoteip_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_setenvif.mak b/modules/metadata/mod_setenvif.mak
index 74372856..8c748b67 100644
--- a/modules/metadata/mod_setenvif.mak
+++ b/modules/metadata/mod_setenvif.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_setenvif.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_setenvif.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_setenvif.so" /d LONG_NAME="setenvif_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_setenvif.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_setenvif.so" /d LONG_NAME="setenvif_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_setenvif - Win32 Debug"
"$(INTDIR)\mod_setenvif.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_setenvif.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_setenvif.so" /d LONG_NAME="setenvif_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_setenvif.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_setenvif.so" /d LONG_NAME="setenvif_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_unique_id.mak b/modules/metadata/mod_unique_id.mak
index 83ad0ba3..b10097bc 100644
--- a/modules/metadata/mod_unique_id.mak
+++ b/modules/metadata/mod_unique_id.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_unique_id.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_unique_id.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_unique_id.so" /d LONG_NAME="unique_id_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_unique_id.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_unique_id.so" /d LONG_NAME="unique_id_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_unique_id - Win32 Debug"
"$(INTDIR)\mod_unique_id.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_unique_id.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_unique_id.so" /d LONG_NAME="unique_id_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_unique_id.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_unique_id.so" /d LONG_NAME="unique_id_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_usertrack.mak b/modules/metadata/mod_usertrack.mak
index 8bf1a0c8..0912540f 100644
--- a/modules/metadata/mod_usertrack.mak
+++ b/modules/metadata/mod_usertrack.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_usertrack.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_usertrack.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_usertrack.so" /d LONG_NAME="usertrack_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_usertrack.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_usertrack.so" /d LONG_NAME="usertrack_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_usertrack - Win32 Debug"
"$(INTDIR)\mod_usertrack.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_usertrack.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_usertrack.so" /d LONG_NAME="usertrack_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_usertrack.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_usertrack.so" /d LONG_NAME="usertrack_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/metadata/mod_version.mak b/modules/metadata/mod_version.mak
index 02a218c7..a723251d 100644
--- a/modules/metadata/mod_version.mak
+++ b/modules/metadata/mod_version.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_version.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_version.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_version.so" /d LONG_NAME="version_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_version.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_version.so" /d LONG_NAME="version_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_version - Win32 Debug"
"$(INTDIR)\mod_version.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_version.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_version.so" /d LONG_NAME="version_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_version.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_version.so" /d LONG_NAME="version_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/NWGNUmakefile b/modules/proxy/NWGNUmakefile
index dce99d16..d44644f0 100644
--- a/modules/proxy/NWGNUmakefile
+++ b/modules/proxy/NWGNUmakefile
@@ -161,6 +161,7 @@ TARGET_nlm = \
$(OBJDIR)/proxyfcgi.nlm \
$(OBJDIR)/proxyscgi.nlm \
$(OBJDIR)/proxyexpress.nlm \
+ $(OBJDIR)/proxyhcheck.nlm \
$(OBJDIR)/proxylbm_busy.nlm \
$(OBJDIR)/proxylbm_hb.nlm \
$(OBJDIR)/proxylbm_req.nlm \
diff --git a/modules/proxy/balancers/config2.m4 b/modules/proxy/balancers/config2.m4
index f7232661..f6372815 100644
--- a/modules/proxy/balancers/config2.m4
+++ b/modules/proxy/balancers/config2.m4
@@ -1,8 +1,8 @@
APACHE_MODPATH_INIT(proxy/balancers)
-APACHE_MODULE(lbmethod_byrequests, Apache proxy Load balancing by request counting, , , $proxy_mods_enable)
-APACHE_MODULE(lbmethod_bytraffic, Apache proxy Load balancing by traffic counting, , , $proxy_mods_enable)
-APACHE_MODULE(lbmethod_bybusyness, Apache proxy Load balancing by busyness, , , $proxy_mods_enable)
-APACHE_MODULE(lbmethod_heartbeat, Apache proxy Load balancing from Heartbeats, , , $proxy_mods_enable)
+APACHE_MODULE(lbmethod_byrequests, Apache proxy Load balancing by request counting, , , $enable_proxy_balancer, , proxy_balancer)
+APACHE_MODULE(lbmethod_bytraffic, Apache proxy Load balancing by traffic counting, , , $enable_proxy_balancer, , proxy_balancer)
+APACHE_MODULE(lbmethod_bybusyness, Apache proxy Load balancing by busyness, , , $enable_proxy_balancer, , proxy_balancer)
+APACHE_MODULE(lbmethod_heartbeat, Apache proxy Load balancing from Heartbeats, , , $enable_proxy_balancer, , proxy_balancer)
APACHE_MODPATH_FINISH
diff --git a/modules/proxy/balancers/mod_lbmethod_bybusyness.mak b/modules/proxy/balancers/mod_lbmethod_bybusyness.mak
index d793c776..4a04fd68 100644
--- a/modules/proxy/balancers/mod_lbmethod_bybusyness.mak
+++ b/modules/proxy/balancers/mod_lbmethod_bybusyness.mak
@@ -391,14 +391,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_lbmethod_bybusyness.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bybusyness.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_bybusyness.so" /d LONG_NAME="lbmethod_bybusyness_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bybusyness.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_bybusyness.so" /d LONG_NAME="lbmethod_bybusyness_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_lbmethod_bybusyness - Win32 Debug"
"$(INTDIR)\mod_lbmethod_bybusyness.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bybusyness.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_bybusyness.so" /d LONG_NAME="lbmethod_bybusyness_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bybusyness.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_bybusyness.so" /d LONG_NAME="lbmethod_bybusyness_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/balancers/mod_lbmethod_byrequests.mak b/modules/proxy/balancers/mod_lbmethod_byrequests.mak
index f6c95ac5..b5914a21 100644
--- a/modules/proxy/balancers/mod_lbmethod_byrequests.mak
+++ b/modules/proxy/balancers/mod_lbmethod_byrequests.mak
@@ -391,14 +391,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_lbmethod_byrequests.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_byrequests.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_byrequests.so" /d LONG_NAME="lbmethod_byrequests_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_byrequests.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_byrequests.so" /d LONG_NAME="lbmethod_byrequests_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_lbmethod_byrequests - Win32 Debug"
"$(INTDIR)\mod_lbmethod_byrequests.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_byrequests.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_byrequests.so" /d LONG_NAME="lbmethod_byrequests_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_byrequests.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_byrequests.so" /d LONG_NAME="lbmethod_byrequests_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/balancers/mod_lbmethod_bytraffic.mak b/modules/proxy/balancers/mod_lbmethod_bytraffic.mak
index d3bd5e29..fe68c2bf 100644
--- a/modules/proxy/balancers/mod_lbmethod_bytraffic.mak
+++ b/modules/proxy/balancers/mod_lbmethod_bytraffic.mak
@@ -391,14 +391,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_lbmethod_bytraffic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bytraffic.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_bytraffic.so" /d LONG_NAME="lbmethod_bytraffic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bytraffic.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_bytraffic.so" /d LONG_NAME="lbmethod_bytraffic_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_lbmethod_bytraffic - Win32 Debug"
"$(INTDIR)\mod_lbmethod_bytraffic.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bytraffic.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_bytraffic.so" /d LONG_NAME="lbmethod_bytraffic_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_bytraffic.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_bytraffic.so" /d LONG_NAME="lbmethod_bytraffic_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/balancers/mod_lbmethod_heartbeat.mak b/modules/proxy/balancers/mod_lbmethod_heartbeat.mak
index d699aaa8..31bd4af9 100644
--- a/modules/proxy/balancers/mod_lbmethod_heartbeat.mak
+++ b/modules/proxy/balancers/mod_lbmethod_heartbeat.mak
@@ -391,14 +391,14 @@ SOURCE=..\..\..\build\win32\httpd.rc
"$(INTDIR)\mod_lbmethod_heartbeat.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_heartbeat.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_heartbeat.so" /d LONG_NAME="lbmethod_heartbeat_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_heartbeat.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_lbmethod_heartbeat.so" /d LONG_NAME="lbmethod_heartbeat_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_lbmethod_heartbeat - Win32 Debug"
"$(INTDIR)\mod_lbmethod_heartbeat.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_heartbeat.res" /i "../../../include" /i "../../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_heartbeat.so" /d LONG_NAME="lbmethod_heartbeat_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_lbmethod_heartbeat.res" /i "../../../include" /i "../../../srclib/apr/include" /i "../../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_lbmethod_heartbeat.so" /d LONG_NAME="lbmethod_heartbeat_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/config.m4 b/modules/proxy/config.m4
index ce625910..ebb13f00 100644
--- a/modules/proxy/config.m4
+++ b/modules/proxy/config.m4
@@ -2,16 +2,22 @@ dnl modules enabled in this directory by default
APACHE_MODPATH_INIT(proxy)
-if test "$enable_proxy" = "shared"; then
- proxy_mods_enable=shared
-elif test "$enable_proxy" = "yes"; then
- proxy_mods_enable=yes
-else
- proxy_mods_enable=most
-fi
-
proxy_objs="mod_proxy.lo proxy_util.lo"
-APACHE_MODULE(proxy, Apache proxy module, $proxy_objs, , $proxy_mods_enable)
+APACHE_MODULE(proxy, Apache proxy module, $proxy_objs, , most)
+
+dnl set aside module selections and default, and set the module default to the
+dnl same scope (shared|static) as selected for mod proxy, along with setting
+dnl the default selection to "most" for remaining proxy modules, mirroring the
+dnl behavior of 2.4.1 and later, but failing ./configure only if an explicitly
+dnl enabled module is missing its prereqs
+save_module_selection=$module_selection
+save_module_default=$module_default
+if test "$enable_proxy" != "no"; then
+ module_selection=most
+ if test "$enable_proxy" = "shared" -o "$enable_proxy" = "static"; then
+ module_default=$enable_proxy
+ fi
+fi
proxy_connect_objs="mod_proxy_connect.lo"
proxy_ftp_objs="mod_proxy_ftp.lo"
@@ -39,11 +45,11 @@ case "$host" in
;;
esac
-APACHE_MODULE(proxy_connect, Apache proxy CONNECT module. Requires and is enabled by --enable-proxy., $proxy_connect_objs, , $proxy_mods_enable,, proxy)
-APACHE_MODULE(proxy_ftp, Apache proxy FTP module. Requires and is enabled by --enable-proxy., $proxy_ftp_objs, , $proxy_mods_enable,, proxy)
-APACHE_MODULE(proxy_http, Apache proxy HTTP module. Requires and is enabled by --enable-proxy., $proxy_http_objs, , $proxy_mods_enable,, proxy)
-APACHE_MODULE(proxy_fcgi, Apache proxy FastCGI module. Requires and is enabled by --enable-proxy., $proxy_fcgi_objs, , $proxy_mods_enable,, proxy)
-APACHE_MODULE(proxy_scgi, Apache proxy SCGI module. Requires and is enabled by --enable-proxy., $proxy_scgi_objs, , $proxy_mods_enable,, proxy)
+APACHE_MODULE(proxy_connect, Apache proxy CONNECT module. Requires --enable-proxy., $proxy_connect_objs, , most, , proxy)
+APACHE_MODULE(proxy_ftp, Apache proxy FTP module. Requires --enable-proxy., $proxy_ftp_objs, , most, , proxy)
+APACHE_MODULE(proxy_http, Apache proxy HTTP module. Requires --enable-proxy., $proxy_http_objs, , most, , proxy)
+APACHE_MODULE(proxy_fcgi, Apache proxy FastCGI module. Requires --enable-proxy., $proxy_fcgi_objs, , most, , proxy)
+APACHE_MODULE(proxy_scgi, Apache proxy SCGI module. Requires --enable-proxy., $proxy_scgi_objs, , most, , proxy)
APACHE_MODULE(proxy_fdpass, Apache proxy to Unix Daemon Socket module. Requires --enable-proxy., $proxy_fdpass_objs, , , [
AC_CHECK_DECL(CMSG_DATA,,, [
#include <sys/types.h>
@@ -54,13 +60,17 @@ APACHE_MODULE(proxy_fdpass, Apache proxy to Unix Daemon Socket module. Requires
enable_proxy_fdpass=no
fi
],proxy)
-APACHE_MODULE(proxy_wstunnel, Apache proxy Websocket Tunnel module. Requires and is enabled by --enable-proxy., $proxy_wstunnel_objs, , $proxy_mods_enable,, proxy)
-APACHE_MODULE(proxy_ajp, Apache proxy AJP module. Requires and is enabled by --enable-proxy., $proxy_ajp_objs, , $proxy_mods_enable,, proxy)
-APACHE_MODULE(proxy_balancer, Apache proxy BALANCER module. Requires and is enabled by --enable-proxy., $proxy_balancer_objs, , $proxy_mods_enable,, proxy)
+APACHE_MODULE(proxy_wstunnel, Apache proxy Websocket Tunnel module. Requires --enable-proxy., $proxy_wstunnel_objs, , most, , proxy)
+APACHE_MODULE(proxy_ajp, Apache proxy AJP module. Requires --enable-proxy., $proxy_ajp_objs, , most, , proxy)
+APACHE_MODULE(proxy_balancer, Apache proxy BALANCER module. Requires --enable-proxy., $proxy_balancer_objs, , most, , proxy)
-APACHE_MODULE(proxy_express, mass reverse-proxy module. Requires --enable-proxy., , , $proxy_mods_enable,, proxy)
+APACHE_MODULE(proxy_express, mass reverse-proxy module. Requires --enable-proxy., , , most, , proxy)
+APACHE_MODULE(proxy_hcheck, [reverse-proxy health-check module. Requires --enable-proxy and --enable-watchdog.], , , most, , [proxy,watchdog])
APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
+module_selection=$save_module_selection
+module_default=$save_module_default
+
APACHE_MODPATH_FINISH
diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
index 9a584333..cdcda4f3 100644
--- a/modules/proxy/mod_proxy.c
+++ b/modules/proxy/mod_proxy.c
@@ -36,6 +36,40 @@ APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup,
#define MAX(x,y) ((x) >= (y) ? (x) : (y))
#endif
+/*
+ * We do health-checks only if that (sub)module is loaded in. This
+ * allows for us to continue as is w/o requiring mod_watchdog for
+ * those implementations which aren't using health checks
+ */
+static APR_OPTIONAL_FN_TYPE(set_worker_hc_param) *set_worker_hc_param_f = NULL;
+
+/* Externals */
+proxy_hcmethods_t PROXY_DECLARE_DATA proxy_hcmethods[] = {
+ {NONE, "NONE", 1},
+ {TCP, "TCP", 1},
+ {OPTIONS, "OPTIONS", 1},
+ {HEAD, "HEAD", 1},
+ {GET, "GET", 1},
+ {CPING, "CPING", 0},
+ {PROVIDER, "PROVIDER", 0},
+ {EOT, NULL, 1}
+};
+
+proxy_wstat_t PROXY_DECLARE_DATA proxy_wstat_tbl[] = {
+ {PROXY_WORKER_INITIALIZED, PROXY_WORKER_INITIALIZED_FLAG, "Init "},
+ {PROXY_WORKER_IGNORE_ERRORS, PROXY_WORKER_IGNORE_ERRORS_FLAG, "Ign "},
+ {PROXY_WORKER_DRAIN, PROXY_WORKER_DRAIN_FLAG, "Drn "},
+ {PROXY_WORKER_GENERIC, PROXY_WORKER_GENERIC_FLAG, "Gen "},
+ {PROXY_WORKER_IN_SHUTDOWN, PROXY_WORKER_IN_SHUTDOWN_FLAG, "Shut "},
+ {PROXY_WORKER_DISABLED, PROXY_WORKER_DISABLED_FLAG, "Dis "},
+ {PROXY_WORKER_STOPPED, PROXY_WORKER_STOPPED_FLAG, "Stop "},
+ {PROXY_WORKER_IN_ERROR, PROXY_WORKER_IN_ERROR_FLAG, "Err "},
+ {PROXY_WORKER_HOT_STANDBY, PROXY_WORKER_HOT_STANDBY_FLAG, "Stby "},
+ {PROXY_WORKER_FREE, PROXY_WORKER_FREE_FLAG, "Free "},
+ {PROXY_WORKER_HC_FAIL, PROXY_WORKER_HC_FAIL_FLAG, "HcFl "},
+ {0x0, '\0', NULL}
+};
+
static const char * const proxy_id = "proxy";
apr_global_mutex_t *proxy_mutex = NULL;
@@ -56,6 +90,7 @@ apr_global_mutex_t *proxy_mutex = NULL;
/* Translate the URL into a 'filename' */
static const char *set_worker_param(apr_pool_t *p,
+ server_rec *s,
proxy_worker *worker,
const char *key,
const char *val)
@@ -274,7 +309,11 @@ static const char *set_worker_param(apr_pool_t *p,
PROXY_STRNCPY(worker->s->flusher, val);
}
else {
- return "unknown Worker parameter";
+ if (set_worker_hc_param_f) {
+ return set_worker_hc_param_f(p, s, worker, key, val, NULL);
+ } else {
+ return "unknown Worker parameter";
+ }
}
return NULL;
}
@@ -1170,7 +1209,8 @@ static int proxy_handler(request_rec *r)
* We can not failover to another worker.
* Mark the worker as unusable if member of load balancer
*/
- if (balancer) {
+ if (balancer
+ && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
worker->s->status |= PROXY_WORKER_IN_ERROR;
worker->s->error_time = apr_time_now();
}
@@ -1181,7 +1221,8 @@ static int proxy_handler(request_rec *r)
* We can failover to another worker
* Mark the worker as unusable if member of load balancer
*/
- if (balancer) {
+ if (balancer
+ && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
worker->s->status |= PROXY_WORKER_IN_ERROR;
worker->s->error_time = apr_time_now();
}
@@ -1675,7 +1716,7 @@ static const char *
"Ignoring parameter '%s=%s' for worker '%s' because of worker sharing",
elts[i].key, elts[i].val, ap_proxy_worker_name(cmd->pool, worker));
} else {
- const char *err = set_worker_param(cmd->pool, worker, elts[i].key,
+ const char *err = set_worker_param(cmd->pool, s, worker, elts[i].key,
elts[i].val);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL);
@@ -2159,7 +2200,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg)
"Ignoring parameter '%s=%s' for worker '%s' because of worker sharing",
elts[i].key, elts[i].val, ap_proxy_worker_name(cmd->pool, worker));
} else {
- err = set_worker_param(cmd->pool, worker, elts[i].key,
+ err = set_worker_param(cmd->pool, cmd->server, worker, elts[i].key,
elts[i].val);
if (err)
return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL);
@@ -2244,7 +2285,7 @@ static const char *
else
*val++ = '\0';
if (worker)
- err = set_worker_param(cmd->pool, worker, word, val);
+ err = set_worker_param(cmd->pool, cmd->server, worker, word, val);
else
err = set_balancer_param(conf, cmd->pool, balancer, word, val);
@@ -2383,7 +2424,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg)
else
*val++ = '\0';
if (worker)
- err = set_worker_param(cmd->pool, worker, word, val);
+ err = set_worker_param(cmd->pool, cmd->server, worker, word, val);
else
err = set_balancer_param(sconf, cmd->pool, balancer,
word, val);
@@ -2745,6 +2786,7 @@ static int proxy_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
APR_HOOK_MIDDLE);
/* Reset workers count on gracefull restart */
proxy_lb_workers = 0;
+ set_worker_hc_param_f = APR_RETRIEVE_OPTIONAL_FN(set_worker_hc_param);
return OK;
}
static void register_hooks(apr_pool_t *p)
@@ -2757,8 +2799,8 @@ static void register_hooks(apr_pool_t *p)
* make sure that we are called after the mpm
* initializes.
*/
- static const char *const aszPred[] = { "mpm_winnt.c", "mod_proxy_balancer.c", NULL};
-
+ static const char *const aszPred[] = { "mpm_winnt.c", "mod_proxy_balancer.c",
+ "mod_proxy_hcheck.c", NULL};
/* handler */
ap_hook_handler(proxy_handler, NULL, NULL, APR_HOOK_FIRST);
/* filename-to-URI translation */
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
index 72dab333..f1413c56 100644
--- a/modules/proxy/mod_proxy.h
+++ b/modules/proxy/mod_proxy.h
@@ -75,6 +75,22 @@ enum enctype {
enc_path, enc_search, enc_user, enc_fpath, enc_parm
};
+typedef enum {
+ NONE, TCP, OPTIONS, HEAD, GET, CPING, PROVIDER, EOT
+} hcmethod_t;
+
+typedef struct {
+ hcmethod_t method;
+ char *name;
+ int implemented;
+} proxy_hcmethods_t;
+
+typedef struct {
+ unsigned int bit;
+ char flag;
+ const char *name;
+} proxy_wstat_t;
+
#define BALANCER_PREFIX "balancer://"
#if APR_CHARSET_EBCDIC
@@ -139,7 +155,7 @@ typedef struct {
proxy_worker *reverse; /* reverse "module-driven" proxy worker */
const char *domain; /* domain name to use in absence of a domain name in the request */
const char *id;
- apr_pool_t *pool; /* Pool used for allocating this struct */
+ apr_pool_t *pool; /* Pool used for allocating this struct's elements */
int req; /* true if proxy requests are enabled */
int max_balancers; /* maximum number of allowed balancers */
int bgrowth; /* number of post-config balancers can added */
@@ -270,8 +286,11 @@ struct proxy_conn_pool {
proxy_conn_rec *conn; /* Single connection for prefork mpm */
};
-/* Keep below in sync with proxy_util.c! */
/* worker status bits */
+/*
+ * NOTE: Keep up-to-date w/ proxy_wstat_tbl[]
+ * in mod_proxy.c !
+ */
#define PROXY_WORKER_INITIALIZED 0x0001
#define PROXY_WORKER_IGNORE_ERRORS 0x0002
#define PROXY_WORKER_DRAIN 0x0004
@@ -282,6 +301,7 @@ struct proxy_conn_pool {
#define PROXY_WORKER_IN_ERROR 0x0080
#define PROXY_WORKER_HOT_STANDBY 0x0100
#define PROXY_WORKER_FREE 0x0200
+#define PROXY_WORKER_HC_FAIL 0x0400
/* worker status flags */
#define PROXY_WORKER_INITIALIZED_FLAG 'O'
@@ -294,9 +314,11 @@ struct proxy_conn_pool {
#define PROXY_WORKER_IN_ERROR_FLAG 'E'
#define PROXY_WORKER_HOT_STANDBY_FLAG 'H'
#define PROXY_WORKER_FREE_FLAG 'F'
+#define PROXY_WORKER_HC_FAIL_FLAG 'C'
#define PROXY_WORKER_NOT_USABLE_BITMAP ( PROXY_WORKER_IN_SHUTDOWN | \
-PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR )
+PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR | \
+PROXY_WORKER_HC_FAIL )
/* NOTE: these check the shared status */
#define PROXY_WORKER_IS_INITIALIZED(f) ( (f)->s->status & PROXY_WORKER_INITIALIZED )
@@ -310,6 +332,10 @@ PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR )
#define PROXY_WORKER_IS_GENERIC(f) ( (f)->s->status & PROXY_WORKER_GENERIC )
+#define PROXY_WORKER_IS_HCFAILED(f) ( (f)->s->status & PROXY_WORKER_HC_FAIL )
+
+#define PROXY_WORKER_IS(f, b) ( (f)->s->status & (b) )
+
/* default worker retry timeout in seconds */
#define PROXY_WORKER_DEFAULT_RETRY 60
@@ -349,6 +375,7 @@ typedef struct {
} proxy_hashes ;
/* Runtime worker status informations. Shared in scoreboard */
+/* The addition of member uds_path in 2.4.7 was an incompatible API change. */
typedef struct {
char name[PROXY_WORKER_MAX_NAME_SIZE];
char scheme[PROXY_WORKER_MAX_SCHEME_SIZE]; /* scheme to use ajp|http|https */
@@ -403,6 +430,14 @@ typedef struct {
unsigned int keepalive_set:1;
unsigned int disablereuse_set:1;
unsigned int was_malloced:1;
+ char hcuri[PROXY_WORKER_MAX_ROUTE_SIZE]; /* health check uri */
+ char hcexpr[PROXY_WORKER_MAX_SCHEME_SIZE]; /* name of condition expr for health check */
+ int passes; /* number of successes for check to pass */
+ int pcount; /* current count of passes */
+ int fails; /* number of failures for check to fail */
+ int fcount; /* current count of failures */
+ hcmethod_t method; /* method to use for health check */
+ apr_interval_time_t interval;
} proxy_worker_shared;
#define ALIGNED_PROXY_WORKER_SHARED_SIZE (APR_ALIGN_DEFAULT(sizeof(proxy_worker_shared)))
@@ -418,6 +453,11 @@ struct proxy_worker {
void *context; /* general purpose storage */
};
+/* default to health check every 30 seconds */
+#define HCHECK_WATHCHDOG_DEFAULT_INTERVAL (30)
+/* The watchdog runs every 2 seconds, which is also the minimal check */
+#define HCHECK_WATHCHDOG_INTERVAL (2)
+
/*
* Time to wait (in microseconds) to find out if more data is currently
* available at the backend.
@@ -508,6 +548,26 @@ struct proxy_balancer_method {
#define PROXY_DECLARE_DATA __declspec(dllimport)
#endif
+/* Using PROXY_DECLARE_OPTIONAL_HOOK instead of
+ * APR_DECLARE_EXTERNAL_HOOK allows build/make_nw_export.awk
+ * to distinguish between hooks that implement
+ * proxy_hook_xx and proxy_hook_get_xx in mod_proxy.c and
+ * those which don't.
+ */
+#define PROXY_DECLARE_OPTIONAL_HOOK APR_DECLARE_EXTERNAL_HOOK
+
+/* These 2 are in mod_proxy.c */
+extern PROXY_DECLARE_DATA proxy_hcmethods_t proxy_hcmethods[];
+extern PROXY_DECLARE_DATA proxy_wstat_t proxy_wstat_tbl[];
+
+/* Following 4 from health check */
+APR_DECLARE_OPTIONAL_FN(void, hc_show_exprs, (request_rec *));
+APR_DECLARE_OPTIONAL_FN(void, hc_select_exprs, (request_rec *, const char *));
+APR_DECLARE_OPTIONAL_FN(int, hc_valid_expr, (request_rec *, const char *));
+APR_DECLARE_OPTIONAL_FN(const char *, set_worker_hc_param,
+ (apr_pool_t *, server_rec *, proxy_worker *,
+ const char *, const char *, void *));
+
APR_DECLARE_EXTERNAL_HOOK(proxy, PROXY, int, scheme_handler, (request_rec *r,
proxy_worker *worker, proxy_server_conf *conf, char *url,
const char *proxyhost, apr_port_t proxyport))
@@ -1019,6 +1079,12 @@ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
APR_DECLARE_OPTIONAL_FN(int, ap_proxy_clear_connection,
(request_rec *r, apr_table_t *headers));
+/**
+ * @param socket socket to test
+ * @return TRUE if socket is connected/active
+ */
+PROXY_DECLARE(int) ap_proxy_is_socket_connected(apr_socket_t *socket);
+
#define PROXY_LBMETHOD "proxylbmethod"
/* The number of dynamic workers that can be added when reconfiguring.
@@ -1040,6 +1106,13 @@ int ap_proxy_lb_workers(void);
PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme);
/**
+ * Return the name of the health check method (eg: "OPTIONS").
+ * @param method method enum
+ * @return name of method
+ */
+PROXY_DECLARE (const char *) ap_proxy_show_hcmethod(hcmethod_t method);
+
+/**
* Strip a unix domain socket (UDS) prefix from the input URL
* @param p pool to allocate result from
* @param url a URL potentially prefixed with a UDS path
diff --git a/modules/proxy/mod_proxy.mak b/modules/proxy/mod_proxy.mak
index 53a9df6d..98737d63 100644
--- a/modules/proxy/mod_proxy.mak
+++ b/modules/proxy/mod_proxy.mak
@@ -344,14 +344,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy.so" /d LONG_NAME="proxy_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy.so" /d LONG_NAME="proxy_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy - Win32 Debug"
"$(INTDIR)\mod_proxy.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy.so" /d LONG_NAME="proxy_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy.so" /d LONG_NAME="proxy_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_ajp.mak b/modules/proxy/mod_proxy_ajp.mak
index 30e2a110..b14a569a 100644
--- a/modules/proxy/mod_proxy_ajp.mak
+++ b/modules/proxy/mod_proxy_ajp.mak
@@ -399,14 +399,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_ajp.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ajp.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_ajp.so" /d LONG_NAME="proxy_ajp_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ajp.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_ajp.so" /d LONG_NAME="proxy_ajp_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_ajp - Win32 Debug"
"$(INTDIR)\mod_proxy_ajp.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ajp.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_ajp.so" /d LONG_NAME="proxy_ajp_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ajp.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_ajp.so" /d LONG_NAME="proxy_ajp_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c
index 702650a9..69ad5dce 100644
--- a/modules/proxy/mod_proxy_balancer.c
+++ b/modules/proxy/mod_proxy_balancer.c
@@ -28,9 +28,16 @@ ap_slotmem_provider_t *storage = NULL;
module AP_MODULE_DECLARE_DATA proxy_balancer_module;
+static APR_OPTIONAL_FN_TYPE(set_worker_hc_param) *set_worker_hc_param_f = NULL;
+
static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
proxy_worker *worker, server_rec *s) = NULL;
+static APR_OPTIONAL_FN_TYPE(hc_show_exprs) *hc_show_exprs_f = NULL;
+static APR_OPTIONAL_FN_TYPE(hc_select_exprs) *hc_select_exprs_f = NULL;
+static APR_OPTIONAL_FN_TYPE(hc_valid_expr) *hc_valid_expr_f = NULL;
+
+
/*
* Register our mutex type before the config is read so we
* can adjust the mutex settings using the Mutex directive.
@@ -46,7 +53,10 @@ static int balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
if (rv != APR_SUCCESS) {
return rv;
}
-
+ set_worker_hc_param_f = APR_RETRIEVE_OPTIONAL_FN(set_worker_hc_param);
+ hc_show_exprs_f = APR_RETRIEVE_OPTIONAL_FN(hc_show_exprs);
+ hc_select_exprs_f = APR_RETRIEVE_OPTIONAL_FN(hc_select_exprs);
+ hc_valid_expr_f = APR_RETRIEVE_OPTIONAL_FN(hc_valid_expr);
return OK;
}
@@ -633,7 +643,8 @@ static int proxy_balancer_post_request(proxy_worker *worker,
return HTTP_INTERNAL_SERVER_ERROR;
}
- if (!apr_is_empty_array(balancer->errstatuses)) {
+ if (!apr_is_empty_array(balancer->errstatuses)
+ && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
int i;
for (i = 0; i < balancer->errstatuses->nelts; i++) {
int val = ((int *)balancer->errstatuses->elts)[i];
@@ -652,6 +663,7 @@ static int proxy_balancer_post_request(proxy_worker *worker,
}
if (balancer->failontimeout
+ && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)
&& (apr_table_get(r->notes, "proxy_timedout")) != NULL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02460)
"%s: Forcing worker (%s) into error state "
@@ -920,10 +932,10 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
static void create_radio(const char *name, unsigned int flag, request_rec *r)
{
- ap_rvputs(r, "<td>On <input name='", name, "' id='", name, "' value='1' type=radio", NULL);
+ ap_rvputs(r, "<td><label for='", name, "1'>On</label> <input name='", name, "' id='", name, "1' value='1' type=radio", NULL);
if (flag)
ap_rputs(" checked", r);
- ap_rvputs(r, "> <br/> Off <input name='", name, "' id='", name, "' value='0' type=radio", NULL);
+ ap_rvputs(r, "> <br/> <label for='", name, "0'>Off</label> <input name='", name, "' id='", name, "0' value='0' type=radio", NULL);
if (!flag)
ap_rputs(" checked", r);
ap_rputs("></td>\n", r);
@@ -1093,17 +1105,27 @@ static int balancer_handler(request_rec *r)
else
*wsel->s->redirect = '\0';
}
+ /*
+ * TODO: Look for all 'w_status_#' keys and then loop thru
+ * on that # character, since the character == the flag
+ */
if ((val = apr_table_get(params, "w_status_I"))) {
- ap_proxy_set_wstatus('I', atoi(val), wsel);
+ ap_proxy_set_wstatus(PROXY_WORKER_IGNORE_ERRORS_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_N"))) {
- ap_proxy_set_wstatus('N', atoi(val), wsel);
+ ap_proxy_set_wstatus(PROXY_WORKER_DRAIN_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_D"))) {
- ap_proxy_set_wstatus('D', atoi(val), wsel);
+ ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_H"))) {
- ap_proxy_set_wstatus('H', atoi(val), wsel);
+ ap_proxy_set_wstatus(PROXY_WORKER_HOT_STANDBY_FLAG, atoi(val), wsel);
+ }
+ if ((val = apr_table_get(params, "w_status_S"))) {
+ ap_proxy_set_wstatus(PROXY_WORKER_STOPPED_FLAG, atoi(val), wsel);
+ }
+ if ((val = apr_table_get(params, "w_status_C"))) {
+ ap_proxy_set_wstatus(PROXY_WORKER_HC_FAIL_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_ls"))) {
int ival = atoi(val);
@@ -1111,6 +1133,47 @@ static int balancer_handler(request_rec *r)
wsel->s->lbset = ival;
}
}
+ if ((val = apr_table_get(params, "w_hi"))) {
+ int ival = atoi(val);
+ if (ival >= HCHECK_WATHCHDOG_INTERVAL) {
+ wsel->s->interval = apr_time_from_sec(ival);
+ }
+ }
+ if ((val = apr_table_get(params, "w_hp"))) {
+ int ival = atoi(val);
+ if (ival >= 1) {
+ wsel->s->passes = ival;
+ }
+ }
+ if ((val = apr_table_get(params, "w_hf"))) {
+ int ival = atoi(val);
+ if (ival >= 1) {
+ wsel->s->fails = ival;
+ }
+ }
+ if ((val = apr_table_get(params, "w_hm"))) {
+ proxy_hcmethods_t *method = proxy_hcmethods;
+ for (; method->name; method++) {
+ if (!strcasecmp(method->name, val) && method->implemented)
+ wsel->s->method = method->method;
+ }
+ }
+ if ((val = apr_table_get(params, "w_hu"))) {
+ if (strlen(val) && strlen(val) < sizeof(wsel->s->hcuri))
+ strcpy(wsel->s->hcuri, val);
+ else
+ *wsel->s->hcuri = '\0';
+ }
+ if (hc_valid_expr_f && (val = apr_table_get(params, "w_he"))) {
+ if (strlen(val) && hc_valid_expr_f(r, val) && strlen(val) < sizeof(wsel->s->hcexpr))
+ strcpy(wsel->s->hcexpr, val);
+ else
+ *wsel->s->hcexpr = '\0';
+ }
+ /* If the health check method doesn't support an expr, then null it */
+ if (wsel->s->method == NONE || wsel->s->method == TCP) {
+ *wsel->s->hcexpr = '\0';
+ }
/* if enabling, we need to reset all lb params */
if (bsel && !was_usable && PROXY_WORKER_IS_USABLE(wsel)) {
bsel->s->need_reset = 1;
@@ -1228,7 +1291,7 @@ static int balancer_handler(request_rec *r)
/* sync all timestamps */
bsel->wupdated = bsel->s->wupdated = nworker->s->updated = apr_time_now();
/* by default, all new workers are disabled */
- ap_proxy_set_wstatus('D', 1, nworker);
+ ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, 1, nworker);
}
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
@@ -1331,18 +1394,7 @@ static int balancer_handler(request_rec *r)
}
/* Begin proxy_worker_stat */
ap_rputs(" <httpd:status>", r);
- if (worker->s->status & PROXY_WORKER_DISABLED)
- ap_rputs("Disabled", r);
- else if (worker->s->status & PROXY_WORKER_IN_ERROR)
- ap_rputs("Error", r);
- else if (worker->s->status & PROXY_WORKER_STOPPED)
- ap_rputs("Stopped", r);
- else if (worker->s->status & PROXY_WORKER_HOT_STANDBY)
- ap_rputs("Standby", r);
- else if (PROXY_WORKER_IS_USABLE(worker))
- ap_rputs("OK", r);
- else if (!PROXY_WORKER_IS_INITIALIZED(worker))
- ap_rputs("Uninitialized", r);
+ ap_rputs(ap_proxy_parse_wstatus(r->pool, worker), r);
ap_rputs("</httpd:status>\n", r);
if ((worker->s->error_time > 0) && apr_rfc822_date(date, worker->s->error_time) == APR_SUCCESS) {
ap_rvputs(r, " <httpd:error_time>", date,
@@ -1447,7 +1499,7 @@ static int balancer_handler(request_rec *r)
" padding: 2px;\n"
" border-style: dotted;\n"
" border-color: gray;\n"
- " background-color: white;\n"
+ " background-color: lightgray;\n"
" text-align: center;\n"
"}\n"
"td {\n"
@@ -1477,10 +1529,10 @@ static int balancer_handler(request_rec *r)
for (i = 0; i < conf->balancers->nelts; i++) {
ap_rputs("<hr />\n<h3>LoadBalancer Status for ", r);
- ap_rvputs(r, "<a href=\"", ap_escape_uri(r->pool, r->uri), "?b=",
+ ap_rvputs(r, "<a href='", ap_escape_uri(r->pool, r->uri), "?b=",
balancer->s->name + sizeof(BALANCER_PREFIX) - 1,
- "&nonce=", balancer->s->nonce,
- "\">", NULL);
+ "&amp;nonce=", balancer->s->nonce,
+ "'>", NULL);
ap_rvputs(r, balancer->s->name, "</a> [",balancer->s->sname, "]</h3>\n", NULL);
ap_rputs("\n\n<table><tr>"
"<th>MaxMembers</th><th>StickySession</th><th>DisableFailover</th><th>Timeout</th><th>FailoverAttempts</th><th>Method</th>"
@@ -1501,9 +1553,9 @@ static int balancer_handler(request_rec *r)
else {
ap_rputs("<td> (None) ", r);
}
- ap_rprintf(r, "<td>%s</td>\n",
+ ap_rprintf(r, "</td><td>%s</td>\n",
balancer->s->sticky_force ? "On" : "Off");
- ap_rprintf(r, "</td><td>%" APR_TIME_T_FMT "</td>",
+ ap_rprintf(r, "<td>%" APR_TIME_T_FMT "</td>",
apr_time_sec(balancer->s->timeout));
ap_rprintf(r, "<td>%d</td>\n", balancer->s->max_attempts);
ap_rprintf(r, "<td>%s</td>\n",
@@ -1520,19 +1572,22 @@ static int balancer_handler(request_rec *r)
"<th>Worker URL</th>"
"<th>Route</th><th>RouteRedir</th>"
"<th>Factor</th><th>Set</th><th>Status</th>"
- "<th>Elected</th><th>Busy</th><th>Load</th><th>To</th><th>From</th>"
- "</tr>\n", r);
+ "<th>Elected</th><th>Busy</th><th>Load</th><th>To</th><th>From</th>", r);
+ if (set_worker_hc_param_f) {
+ ap_rputs("<th>HC Method</th><th>HC Interval</th><th>Passes</th><th>Fails</th><th>HC uri</th><th>HC Expr</th>", r);
+ }
+ ap_rputs("</tr>\n", r);
workers = (proxy_worker **)balancer->workers->elts;
for (n = 0; n < balancer->workers->nelts; n++) {
char fbuf[50];
worker = *workers;
- ap_rvputs(r, "<tr>\n<td><a href=\"",
+ ap_rvputs(r, "<tr>\n<td><a href='",
ap_escape_uri(r->pool, r->uri), "?b=",
- balancer->s->name + sizeof(BALANCER_PREFIX) - 1, "&w=",
+ balancer->s->name + sizeof(BALANCER_PREFIX) - 1, "&amp;w=",
ap_escape_uri(r->pool, worker->s->name),
- "&nonce=", balancer->s->nonce,
- "\">", NULL);
+ "&amp;nonce=", balancer->s->nonce,
+ "'>", NULL);
ap_rvputs(r, (*worker->s->uds_path ? "<i>" : ""), ap_proxy_worker_name(r->pool, worker),
(*worker->s->uds_path ? "</i>" : ""), "</a></td>", NULL);
ap_rvputs(r, "<td>", ap_escape_html(r->pool, worker->s->route),
@@ -1549,6 +1604,14 @@ static int balancer_handler(request_rec *r)
ap_rputs(apr_strfsize(worker->s->transferred, fbuf), r);
ap_rputs("</td><td>", r);
ap_rputs(apr_strfsize(worker->s->read, fbuf), r);
+ if (set_worker_hc_param_f) {
+ ap_rprintf(r, "</td><td>%s</td>", ap_proxy_show_hcmethod(worker->s->method));
+ ap_rprintf(r, "<td>%d</td>", (int)apr_time_sec(worker->s->interval));
+ ap_rprintf(r, "<td>%d (%d)</td>", worker->s->passes,worker->s->pcount);
+ ap_rprintf(r, "<td>%d (%d)</td>", worker->s->fails, worker->s->fcount);
+ ap_rprintf(r, "<td>%s</td>", worker->s->hcuri);
+ ap_rprintf(r, "<td>%s", worker->s->hcexpr);
+ }
ap_rputs("</td></tr>\n", r);
++workers;
@@ -1557,35 +1620,72 @@ static int balancer_handler(request_rec *r)
++balancer;
}
ap_rputs("<hr />\n", r);
+ if (hc_show_exprs_f) {
+ hc_show_exprs_f(r);
+ }
if (wsel && bsel) {
ap_rputs("<h3>Edit worker settings for ", r);
ap_rvputs(r, (*wsel->s->uds_path?"<i>":""), ap_proxy_worker_name(r->pool, wsel), (*wsel->s->uds_path?"</i>":""), "</h3>\n", NULL);
- ap_rputs("<form method=\"POST\" enctype=\"application/x-www-form-urlencoded\" action=\"", r);
- ap_rvputs(r, ap_escape_uri(r->pool, action), "\">\n", NULL);
- ap_rputs("<dl>\n<table><tr><td>Load factor:</td><td><input name='w_lf' id='w_lf' type=text ", r);
+ ap_rputs("<form method='POST' enctype='application/x-www-form-urlencoded' action='", r);
+ ap_rvputs(r, ap_escape_uri(r->pool, action), "'>\n", NULL);
+ ap_rputs("<table><tr><td>Load factor:</td><td><input name='w_lf' id='w_lf' type=text ", r);
ap_rprintf(r, "value='%d'></td></tr>\n", wsel->s->lbfactor);
ap_rputs("<tr><td>LB Set:</td><td><input name='w_ls' id='w_ls' type=text ", r);
ap_rprintf(r, "value='%d'></td></tr>\n", wsel->s->lbset);
ap_rputs("<tr><td>Route:</td><td><input name='w_wr' id='w_wr' type=text ", r);
- ap_rvputs(r, "value=\"", ap_escape_html(r->pool, wsel->s->route),
+ ap_rvputs(r, "value='", ap_escape_html(r->pool, wsel->s->route),
NULL);
- ap_rputs("\"></td></tr>\n", r);
+ ap_rputs("'></td></tr>\n", r);
ap_rputs("<tr><td>Route Redirect:</td><td><input name='w_rr' id='w_rr' type=text ", r);
- ap_rvputs(r, "value=\"", ap_escape_html(r->pool, wsel->s->redirect),
+ ap_rvputs(r, "value='", ap_escape_html(r->pool, wsel->s->redirect),
NULL);
- ap_rputs("\"></td></tr>\n", r);
+ ap_rputs("'></td></tr>\n", r);
ap_rputs("<tr><td>Status:</td>", r);
ap_rputs("<td><table><tr>"
"<th>Ignore Errors</th>"
"<th>Draining Mode</th>"
"<th>Disabled</th>"
- "<th>Hot Standby</th></tr>\n<tr>", r);
- create_radio("w_status_I", (PROXY_WORKER_IGNORE_ERRORS & wsel->s->status), r);
- create_radio("w_status_N", (PROXY_WORKER_DRAIN & wsel->s->status), r);
- create_radio("w_status_D", (PROXY_WORKER_DISABLED & wsel->s->status), r);
- create_radio("w_status_H", (PROXY_WORKER_HOT_STANDBY & wsel->s->status), r);
- ap_rputs("</tr></table>\n", r);
- ap_rputs("<tr><td colspan=2><input type=submit value='Submit'></td></tr>\n", r);
+ "<th>Hot Standby</th>", r);
+ if (hc_show_exprs_f) {
+ ap_rputs("<th>HC Fail</th>", r);
+ }
+ ap_rputs("<th>Stopped</th></tr>\n<tr>", r);
+ create_radio("w_status_I", (PROXY_WORKER_IS(wsel, PROXY_WORKER_IGNORE_ERRORS)), r);
+ create_radio("w_status_N", (PROXY_WORKER_IS(wsel, PROXY_WORKER_DRAIN)), r);
+ create_radio("w_status_D", (PROXY_WORKER_IS(wsel, PROXY_WORKER_DISABLED)), r);
+ create_radio("w_status_H", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HOT_STANDBY)), r);
+ if (hc_show_exprs_f) {
+ create_radio("w_status_C", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HC_FAIL)), r);
+ }
+ create_radio("w_status_S", (PROXY_WORKER_IS(wsel, PROXY_WORKER_STOPPED)), r);
+ ap_rputs("</tr></table></td></tr>\n", r);
+ if (hc_select_exprs_f) {
+ proxy_hcmethods_t *method = proxy_hcmethods;
+ ap_rputs("<tr><td colspan='2'>\n<table align='center'><tr><th>Health Check param</th><th>Value</th></tr>\n", r);
+ ap_rputs("<tr><td>Method</td><td><select name='w_hm'>\n", r);
+ for (; method->name; method++) {
+ if (method->implemented) {
+ ap_rprintf(r, "<option value='%s' %s >%s</option>\n",
+ method->name,
+ (wsel->s->method == method->method) ? "selected" : "",
+ method->name);
+ }
+ }
+ ap_rputs("</select>\n</td></tr>\n", r);
+ ap_rputs("<tr><td>Expr</td><td><select name='w_he'>\n", r);
+ hc_select_exprs_f(r, wsel->s->hcexpr);
+ ap_rputs("</select>\n</td></tr>\n", r);
+ ap_rprintf(r, "<tr><td>Interval (secs)</td><td><input name='w_hi' id='w_hi' type='text'"
+ "value='%d'></td></tr>\n", (int)apr_time_sec(wsel->s->interval));
+ ap_rprintf(r, "<tr><td>Passes trigger</td><td><input name='w_hp' id='w_hp' type='text'"
+ "value='%d'></td></tr>\n", wsel->s->passes);
+ ap_rprintf(r, "<tr><td>Fails trigger)</td><td><input name='w_hf' id='w_hf' type='text'"
+ "value='%d'></td></tr>\n", wsel->s->fails);
+ ap_rprintf(r, "<tr><td>HC uri</td><td><input name='w_hu' id='w_hu' type='text'"
+ "value='%s'</td></tr>\n", ap_escape_html(r->pool, wsel->s->hcuri));
+ ap_rputs("</table>\n</td></tr>\n", r);
+ }
+ ap_rputs("<tr><td colspan='2'><input type=submit value='Submit'></td></tr>\n", r);
ap_rvputs(r, "</table>\n<input type=hidden name='w' id='w' ", NULL);
ap_rvputs(r, "value='", ap_escape_uri(r->pool, wsel->s->name), "'>\n", NULL);
ap_rvputs(r, "<input type=hidden name='b' id='b' ", NULL);
@@ -1603,7 +1703,7 @@ static int balancer_handler(request_rec *r)
ap_rvputs(r, bsel->s->name, "</h3>\n", NULL);
ap_rputs("<form method='POST' enctype='application/x-www-form-urlencoded' action='", r);
ap_rvputs(r, ap_escape_uri(r->pool, action), "'>\n", NULL);
- ap_rputs("<dl>\n<table>\n", r);
+ ap_rputs("<table>\n", r);
provs = ap_list_provider_names(r->pool, PROXY_LBMETHOD, "0");
if (provs) {
ap_rputs("<tr><td>LBmethod:</td>", r);
diff --git a/modules/proxy/mod_proxy_balancer.mak b/modules/proxy/mod_proxy_balancer.mak
index 02561661..86d7ec5e 100644
--- a/modules/proxy/mod_proxy_balancer.mak
+++ b/modules/proxy/mod_proxy_balancer.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_balancer.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_balancer.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_balancer.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_balancer.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_balancer.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_balancer - Win32 Debug"
"$(INTDIR)\mod_proxy_balancer.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_balancer.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_balancer.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_balancer.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_balancer.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_connect.mak b/modules/proxy/mod_proxy_connect.mak
index 40d0069b..be354dbf 100644
--- a/modules/proxy/mod_proxy_connect.mak
+++ b/modules/proxy/mod_proxy_connect.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_connect.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_connect.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_connect.so" /d LONG_NAME="proxy_connect_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_connect.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_connect.so" /d LONG_NAME="proxy_connect_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_connect - Win32 Debug"
"$(INTDIR)\mod_proxy_connect.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_connect.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_connect.so" /d LONG_NAME="proxy_connect_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_connect.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_connect.so" /d LONG_NAME="proxy_connect_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_express.mak b/modules/proxy/mod_proxy_express.mak
index 8bd8f29f..f656d226 100644
--- a/modules/proxy/mod_proxy_express.mak
+++ b/modules/proxy/mod_proxy_express.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_express.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_express.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_express.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_express.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_express.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_express - Win32 Debug"
"$(INTDIR)\mod_proxy_express.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_express.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_express.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_express.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_express.so" /d LONG_NAME="proxy_balancer_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c
index 90c63c3b..19047ff6 100644
--- a/modules/proxy/mod_proxy_fcgi.c
+++ b/modules/proxy/mod_proxy_fcgi.c
@@ -262,11 +262,21 @@ static apr_status_t send_environment(proxy_conn_rec *conn, request_rec *r,
}
}
- /* Strip balancer prefix */
- if (r->filename && !strncmp(r->filename, "proxy:balancer://", 17)) {
- char *newfname = apr_pstrdup(r->pool, r->filename+17);
- newfname = ap_strchr(newfname, '/');
- r->filename = newfname;
+ /* Strip proxy: prefixes */
+ if (r->filename) {
+ char *newfname = NULL;
+
+ if (!strncmp(r->filename, "proxy:balancer://", 17)) {
+ newfname = apr_pstrdup(r->pool, r->filename+17);
+ }
+ else if (!strncmp(r->filename, "proxy:fcgi://", 13)) {
+ newfname = apr_pstrdup(r->pool, r->filename+13);
+ }
+
+ if (newfname) {
+ newfname = ap_strchr(newfname, '/');
+ r->filename = newfname;
+ }
}
ap_add_common_vars(r);
@@ -876,17 +886,17 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
char server_portstr[32];
conn_rec *origin = NULL;
proxy_conn_rec *backend = NULL;
+ apr_uri_t *uri;
proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
&proxy_module);
apr_pool_t *p = r->pool;
- apr_uri_t *uri = apr_palloc(r->pool, sizeof(*uri));
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01076)
"url: %s proxyname: %s proxyport: %d",
- url, proxyname, proxyport);
+ url, proxyname, proxyport);
if (strncasecmp(url, "fcgi:", 5) != 0) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01077) "declining URL %s", url);
@@ -909,6 +919,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
backend->is_ssl = 0;
/* Step One: Determine Who To Connect To */
+ uri = apr_palloc(p, sizeof(*uri));
status = ap_proxy_determine_connection(p, r, conf, worker, backend,
uri, &url, proxyname, proxyport,
server_portstr,
diff --git a/modules/proxy/mod_proxy_fcgi.mak b/modules/proxy/mod_proxy_fcgi.mak
index d21bc019..4b150889 100644
--- a/modules/proxy/mod_proxy_fcgi.mak
+++ b/modules/proxy/mod_proxy_fcgi.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_fcgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_fcgi.so" /d LONG_NAME="proxy_fcgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_fcgi.so" /d LONG_NAME="proxy_fcgi_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_fcgi - Win32 Debug"
"$(INTDIR)\mod_proxy_fcgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_fcgi.so" /d LONG_NAME="proxy_fcgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_fcgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_fcgi.so" /d LONG_NAME="proxy_fcgi_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_ftp.mak b/modules/proxy/mod_proxy_ftp.mak
index 323e8071..0b1ca30d 100644
--- a/modules/proxy/mod_proxy_ftp.mak
+++ b/modules/proxy/mod_proxy_ftp.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_ftp.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ftp.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_ftp.so" /d LONG_NAME="proxy_ftp_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ftp.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_ftp.so" /d LONG_NAME="proxy_ftp_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_ftp - Win32 Debug"
"$(INTDIR)\mod_proxy_ftp.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ftp.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_ftp.so" /d LONG_NAME="proxy_ftp_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_ftp.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_ftp.so" /d LONG_NAME="proxy_ftp_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_hcheck.c b/modules/proxy/mod_proxy_hcheck.c
new file mode 100644
index 00000000..6606652e
--- /dev/null
+++ b/modules/proxy/mod_proxy_hcheck.c
@@ -0,0 +1,1175 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "mod_proxy.h"
+#include "mod_watchdog.h"
+#include "ap_slotmem.h"
+#include "ap_expr.h"
+#if APR_HAS_THREADS
+#include "apr_thread_pool.h"
+#endif
+
+module AP_MODULE_DECLARE_DATA proxy_hcheck_module;
+
+#define HCHECK_WATHCHDOG_NAME ("_proxy_hcheck_")
+#define HC_THREADPOOL_SIZE (16)
+
+/* Why? So we can easily set/clear HC_USE_THREADS during dev testing */
+#if APR_HAS_THREADS
+#define HC_USE_THREADS 1
+#else
+#define HC_USE_THREADS 0
+typedef void apr_thread_pool_t;
+#endif
+
+typedef struct {
+ char *name;
+ hcmethod_t method;
+ int passes;
+ int fails;
+ apr_interval_time_t interval;
+ char *hurl;
+ char *hcexpr;
+} hc_template_t;
+
+typedef struct {
+ char *expr;
+ ap_expr_info_t *pexpr; /* parsed expression */
+} hc_condition_t;
+
+typedef struct {
+ apr_pool_t *p;
+ apr_bucket_alloc_t *ba;
+ apr_array_header_t *templates;
+ apr_table_t *conditions;
+ ap_watchdog_t *watchdog;
+ apr_hash_t *hcworkers;
+ apr_thread_pool_t *hctp;
+ int tpsize;
+ server_rec *s;
+} sctx_t;
+
+/* Used in the HC worker via the context field */
+typedef struct {
+ char *path; /* The path of the original worker URL */
+ char *req; /* pre-formatted HTTP/AJP request */
+ proxy_worker *w; /* Pointer to the actual worker */
+} wctx_t;
+
+typedef struct {
+ apr_pool_t *ptemp;
+ sctx_t *ctx;
+ proxy_worker *worker;
+ apr_time_t now;
+} baton_t;
+
+static void *hc_create_config(apr_pool_t *p, server_rec *s)
+{
+ sctx_t *ctx = (sctx_t *) apr_palloc(p, sizeof(sctx_t));
+ apr_pool_create(&ctx->p, p);
+ ctx->ba = apr_bucket_alloc_create(p);
+ ctx->templates = apr_array_make(p, 10, sizeof(hc_template_t));
+ ctx->conditions = apr_table_make(p, 10);
+ ctx->hcworkers = apr_hash_make(p);
+ ctx->tpsize = HC_THREADPOOL_SIZE;
+ ctx->s = s;
+
+ return ctx;
+}
+
+/*
+ * This serves double duty by not only validating (and creating)
+ * the health-check template, but also ties into set_worker_param()
+ * which does the actual setting of worker params in shm.
+ */
+static const char *set_worker_hc_param(apr_pool_t *p,
+ server_rec *s,
+ proxy_worker *worker,
+ const char *key,
+ const char *val,
+ void *v)
+{
+ int ival;
+ hc_template_t *temp;
+ sctx_t *ctx = (sctx_t *) ap_get_module_config(s->module_config,
+ &proxy_hcheck_module);
+ if (!worker && !v) {
+ return "Bad call to set_worker_hc_param()";
+ }
+ temp = (hc_template_t *)v;
+ if (!strcasecmp(key, "hctemplate")) {
+ hc_template_t *template;
+ template = (hc_template_t *)ctx->templates->elts;
+ for (ival = 0; ival < ctx->templates->nelts; ival++, template++) {
+ if (!strcasecmp(template->name, val)) {
+ if (worker) {
+ worker->s->method = template->method;
+ worker->s->interval = template->interval;
+ worker->s->passes = template->passes;
+ worker->s->fails = template->fails;
+ PROXY_STRNCPY(worker->s->hcuri, template->hurl);
+ PROXY_STRNCPY(worker->s->hcexpr, template->hcexpr);
+ } else {
+ temp->method = template->method;
+ temp->interval = template->interval;
+ temp->passes = template->passes;
+ temp->fails = template->fails;
+ temp->hurl = apr_pstrdup(p, template->hurl);
+ temp->hcexpr = apr_pstrdup(p, template->hcexpr);
+ }
+ return NULL;
+ }
+ }
+ return apr_psprintf(p, "Unknown ProxyHCTemplate name: %s", val);
+ }
+ else if (!strcasecmp(key, "hcmethod")) {
+ proxy_hcmethods_t *method = proxy_hcmethods;
+ for (; method->name; method++) {
+ if (!strcasecmp(val, method->name)) {
+ if (!method->implemented) {
+ return apr_psprintf(p, "Health check method %s not (yet) implemented",
+ val);
+ }
+ if (worker) {
+ worker->s->method = method->method;
+ } else {
+ temp->method = method->method;
+ }
+ return NULL;
+ }
+ }
+ return "Unknown method";
+ }
+ else if (!strcasecmp(key, "hcinterval")) {
+ ival = atoi(val);
+ if (ival < HCHECK_WATHCHDOG_INTERVAL)
+ return apr_psprintf(p, "Interval must be a positive value greater than %d seconds",
+ HCHECK_WATHCHDOG_INTERVAL);
+ if (worker) {
+ worker->s->interval = apr_time_from_sec(ival);
+ } else {
+ temp->interval = apr_time_from_sec(ival);
+ }
+ }
+ else if (!strcasecmp(key, "hcpasses")) {
+ ival = atoi(val);
+ if (ival < 0)
+ return "Passes must be a positive value";
+ if (worker) {
+ worker->s->passes = ival;
+ } else {
+ temp->passes = ival;
+ }
+ }
+ else if (!strcasecmp(key, "hcfails")) {
+ ival = atoi(val);
+ if (ival < 0)
+ return "Fails must be a positive value";
+ if (worker) {
+ worker->s->fails = ival;
+ } else {
+ temp->fails = ival;
+ }
+ }
+ else if (!strcasecmp(key, "hcuri")) {
+ if (strlen(val) >= sizeof(worker->s->hcuri))
+ return apr_psprintf(p, "Health check uri length must be < %d characters",
+ (int)sizeof(worker->s->hcuri));
+ if (worker) {
+ PROXY_STRNCPY(worker->s->hcuri, val);
+ } else {
+ temp->hurl = apr_pstrdup(p, val);
+ }
+ }
+ else if (!strcasecmp(key, "hcexpr")) {
+ hc_condition_t *cond;
+ cond = (hc_condition_t *)apr_table_get(ctx->conditions, val);
+ if (!cond) {
+ return apr_psprintf(p, "Unknown health check condition expr: %s", val);
+ }
+ /* This check is wonky... a known expr can't be this big. Check anyway */
+ if (strlen(val) >= sizeof(worker->s->hcexpr))
+ return apr_psprintf(p, "Health check uri length must be < %d characters",
+ (int)sizeof(worker->s->hcexpr));
+ if (worker) {
+ PROXY_STRNCPY(worker->s->hcexpr, val);
+ } else {
+ temp->hcexpr = apr_pstrdup(p, val);
+ }
+ }
+ else {
+ return "unknown Worker hcheck parameter";
+ }
+ return NULL;
+}
+
+static const char *set_hc_condition(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ char *name = NULL;
+ char *expr;
+ sctx_t *ctx;
+ hc_condition_t *cond;
+
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_HTACCESS);
+ if (err)
+ return err;
+ ctx = (sctx_t *) ap_get_module_config(cmd->server->module_config,
+ &proxy_hcheck_module);
+
+ name = ap_getword_conf(cmd->pool, &arg);
+ if (!*name) {
+ return apr_pstrcat(cmd->temp_pool, "Missing expression name for ",
+ cmd->cmd->name, NULL);
+ }
+ if (strlen(name) > (PROXY_WORKER_MAX_SCHEME_SIZE - 1)) {
+ return apr_psprintf(cmd->temp_pool, "Expression name limited to %d characters",
+ (PROXY_WORKER_MAX_SCHEME_SIZE - 1));
+ }
+ /* get expr. Allow fancy new {...} quoting style */
+ expr = ap_getword_conf2(cmd->temp_pool, &arg);
+ if (!*expr) {
+ return apr_pstrcat(cmd->temp_pool, "Missing expression for ",
+ cmd->cmd->name, NULL);
+ }
+ cond = apr_palloc(cmd->pool, sizeof(hc_condition_t));
+ cond->pexpr = ap_expr_parse_cmd(cmd, expr, 0, &err, NULL);
+ if (err) {
+ return apr_psprintf(cmd->temp_pool, "Could not parse expression \"%s\": %s",
+ expr, err);
+ }
+ cond->expr = apr_pstrdup(cmd->pool, expr);
+ apr_table_setn(ctx->conditions, name, (void *)cond);
+ expr = ap_getword_conf(cmd->temp_pool, &arg);
+ if (*expr) {
+ return "error: extra parameter(s)";
+ }
+
+ return NULL;
+}
+
+static const char *set_hc_template(cmd_parms *cmd, void *dummy, const char *arg)
+{
+ char *name = NULL;
+ char *word, *val;
+ hc_template_t *template;
+ sctx_t *ctx;
+
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_HTACCESS);
+ if (err)
+ return err;
+ ctx = (sctx_t *) ap_get_module_config(cmd->server->module_config,
+ &proxy_hcheck_module);
+
+ name = ap_getword_conf(cmd->temp_pool, &arg);
+ if (!*name) {
+ return apr_pstrcat(cmd->temp_pool, "Missing template name for ",
+ cmd->cmd->name, NULL);
+ }
+
+ template = (hc_template_t *)apr_array_push(ctx->templates);
+
+ template->name = apr_pstrdup(cmd->pool, name);
+ template->method = template->passes = template->fails = 1;
+ template->interval = apr_time_from_sec(HCHECK_WATHCHDOG_DEFAULT_INTERVAL);
+ template->hurl = NULL;
+ template->hcexpr = NULL;
+ while (*arg) {
+ word = ap_getword_conf(cmd->pool, &arg);
+ val = strchr(word, '=');
+ if (!val) {
+ return "Invalid ProxyHCTemplate parameter. Parameter must be "
+ "in the form 'key=value'";
+ }
+ else
+ *val++ = '\0';
+ err = set_worker_hc_param(cmd->pool, ctx->s, NULL, word, val, template);
+
+ if (err) {
+ /* get rid of recently pushed (bad) template */
+ apr_array_pop(ctx->templates);
+ return apr_pstrcat(cmd->temp_pool, "ProxyHCTemplate: ", err, " ", word, "=", val, "; ", name, NULL);
+ }
+ /* No error means we have a valid template */
+ }
+
+ return NULL;
+}
+
+#if HC_USE_THREADS
+static const char *set_hc_tpsize (cmd_parms *cmd, void *dummy, const char *arg)
+{
+ sctx_t *ctx;
+
+ const char *err = ap_check_cmd_context(cmd, NOT_IN_HTACCESS);
+ if (err)
+ return err;
+ ctx = (sctx_t *) ap_get_module_config(cmd->server->module_config,
+ &proxy_hcheck_module);
+
+ ctx->tpsize = atoi(arg);
+ if (ctx->tpsize < 0)
+ return "Invalid ProxyHCTPsize parameter. Parameter must be "
+ ">= 0";
+ return NULL;
+}
+#endif
+
+/*
+ * Create a dummy request rec, simply so we can use ap_expr.
+ * Use our short-lived poll for bucket_alloc
+ */
+static request_rec *create_request_rec(apr_pool_t *p1, conn_rec *conn, const char *method)
+{
+ request_rec *r;
+ apr_pool_t *p;
+ apr_bucket_alloc_t *ba;
+ apr_pool_create(&p, p1);
+ apr_pool_tag(p, "request");
+ r = apr_pcalloc(p, sizeof(request_rec));
+ ba = apr_bucket_alloc_create(p);
+ r->pool = p;
+ r->connection = conn;
+ r->connection->bucket_alloc = ba;
+ r->server = conn->base_server;
+
+ r->user = NULL;
+ r->ap_auth_type = NULL;
+
+ r->allowed_methods = ap_make_method_list(p, 2);
+
+ r->headers_in = apr_table_make(r->pool, 25);
+ r->trailers_in = apr_table_make(r->pool, 5);
+ r->subprocess_env = apr_table_make(r->pool, 25);
+ r->headers_out = apr_table_make(r->pool, 12);
+ r->err_headers_out = apr_table_make(r->pool, 5);
+ r->trailers_out = apr_table_make(r->pool, 5);
+ r->notes = apr_table_make(r->pool, 5);
+
+ r->kept_body = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ r->request_config = ap_create_request_config(r->pool);
+ /* Must be set before we run create request hook */
+
+ r->proto_output_filters = conn->output_filters;
+ r->output_filters = r->proto_output_filters;
+ r->proto_input_filters = conn->input_filters;
+ r->input_filters = r->proto_input_filters;
+ r->per_dir_config = r->server->lookup_defaults;
+
+ r->sent_bodyct = 0; /* bytect isn't for body */
+
+ r->read_length = 0;
+ r->read_body = REQUEST_NO_BODY;
+
+ r->status = HTTP_OK; /* Until further notice */
+ r->header_only = 1;
+ r->the_request = NULL;
+
+ /* Begin by presuming any module can make its own path_info assumptions,
+ * until some module interjects and changes the value.
+ */
+ r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+ r->useragent_addr = conn->client_addr;
+ r->useragent_ip = conn->client_ip;
+
+
+ /* Time to populate r with the data we have. */
+ r->method = method;
+ /* Provide quick information about the request method as soon as known */
+ r->method_number = ap_method_number_of(r->method);
+ if (r->method_number == M_GET && r->method[0] == 'G') {
+ r->header_only = 0;
+ }
+
+ r->protocol = "HTTP/1.0";
+ r->proto_num = HTTP_VERSION(1, 0);
+
+ r->hostname = NULL;
+
+ return r;
+}
+
+static proxy_worker *hc_get_hcworker(sctx_t *ctx, proxy_worker *worker,
+ apr_pool_t *p)
+{
+ proxy_worker *hc = NULL;
+ const char* wptr;
+ apr_port_t port;
+
+ wptr = apr_psprintf(ctx->p, "%pp", worker);
+ hc = (proxy_worker *)apr_hash_get(ctx->hcworkers, wptr, APR_HASH_KEY_STRING);
+ port = (worker->s->port ? worker->s->port : ap_proxy_port_of_scheme(worker->s->scheme));
+ if (!hc) {
+ apr_uri_t uri;
+ apr_status_t rv;
+ const char *url = worker->s->name;
+ wctx_t *wctx = apr_pcalloc(ctx->p, sizeof(wctx_t));
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ctx->s, APLOGNO(03248)
+ "Creating hc worker %s for %s://%s:%d",
+ wptr, worker->s->scheme, worker->s->hostname,
+ (int)port);
+
+ ap_proxy_define_worker(ctx->p, &hc, NULL, NULL, worker->s->name, 0);
+ PROXY_STRNCPY(hc->s->name, wptr);
+ PROXY_STRNCPY(hc->s->hostname, worker->s->hostname);
+ PROXY_STRNCPY(hc->s->scheme, worker->s->scheme);
+ hc->hash.def = hc->s->hash.def = ap_proxy_hashfunc(hc->s->name, PROXY_HASHFUNC_DEFAULT);
+ hc->hash.fnv = hc->s->hash.fnv = ap_proxy_hashfunc(hc->s->name, PROXY_HASHFUNC_FNV);
+ hc->s->port = port;
+ /* Do not disable worker in case of errors */
+ hc->s->status |= PROXY_WORKER_IGNORE_ERRORS;
+ /* Mark as the "generic" worker */
+ hc->s->status |= PROXY_WORKER_GENERIC;
+ ap_proxy_initialize_worker(hc, ctx->s, ctx->p);
+ hc->s->is_address_reusable = worker->s->is_address_reusable;
+ hc->s->disablereuse = worker->s->disablereuse;
+ hc->s->method = worker->s->method;
+ rv = apr_uri_parse(p, url, &uri);
+ if (rv == APR_SUCCESS) {
+ wctx->path = apr_pstrdup(ctx->p, uri.path);
+ }
+ wctx->w = worker;
+ hc->context = wctx;
+ apr_hash_set(ctx->hcworkers, wptr, APR_HASH_KEY_STRING, hc);
+ }
+ /* This *could* have changed via the Balancer Manager */
+ /* TODO */
+ if (hc->s->method != worker->s->method) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ctx->s, APLOGNO(03311)
+ "Updating hc worker %s for %s://%s:%d",
+ wptr, worker->s->scheme, worker->s->hostname,
+ (int)port);
+ hc->s->method = worker->s->method;
+ apr_hash_set(ctx->hcworkers, wptr, APR_HASH_KEY_STRING, hc);
+ }
+ return hc;
+}
+
+static int hc_determine_connection(sctx_t *ctx, proxy_worker *worker) {
+ apr_status_t rv = APR_SUCCESS;
+ int will_reuse = worker->s->is_address_reusable && !worker->s->disablereuse;
+ /*
+ * normally, this is done in ap_proxy_determine_connection().
+ * TODO: Look at using ap_proxy_determine_connection() with a
+ * fake request_rec
+ */
+ if (!worker->cp->addr || !will_reuse) {
+ rv = apr_sockaddr_info_get(&(worker->cp->addr), worker->s->hostname, APR_UNSPEC,
+ worker->s->port, 0, ctx->p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ctx->s, APLOGNO(03249)
+ "DNS lookup failure for: %s:%d",
+ worker->s->hostname, (int)worker->s->port);
+ }
+ }
+ return (rv == APR_SUCCESS ? OK : !OK);
+}
+
+static apr_status_t hc_init_worker(sctx_t *ctx, proxy_worker *worker) {
+ apr_status_t rv = APR_SUCCESS;
+ /*
+ * Since this is the watchdog, workers never actually handle a
+ * request here, and so the local data isn't initialized (of
+ * course, the shared memory is). So we need to bootstrap
+ * worker->cp. Note, we only need do this once.
+ */
+ if (!worker->cp) {
+ rv = ap_proxy_initialize_worker(worker, ctx->s, ctx->p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ctx->s, APLOGNO(03250) "Cannot init worker");
+ return rv;
+ }
+ rv = (hc_determine_connection(ctx, worker) == OK ? APR_SUCCESS : APR_EGENERAL);
+ }
+ return rv;
+}
+
+static apr_status_t backend_cleanup(const char *proxy_function, proxy_conn_rec *backend,
+ server_rec *s, int status)
+{
+ if (backend) {
+ backend->close = 1;
+ ap_proxy_release_connection(proxy_function, backend, s);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03251)
+ "Health check %s Status (%d) for %s.",
+ ap_proxy_show_hcmethod(backend->worker->s->method),
+ status,
+ backend->worker->s->name);
+ }
+ if (status != OK) {
+ return APR_EGENERAL;
+ }
+ return APR_SUCCESS;
+}
+
+static int hc_get_backend(const char *proxy_function, proxy_conn_rec **backend,
+ proxy_worker *hc, sctx_t *ctx)
+{
+ int status;
+ status = ap_proxy_acquire_connection(proxy_function, backend, hc, ctx->s);
+ if (status == OK) {
+ (*backend)->addr = hc->cp->addr;
+ (*backend)->pool = ctx->p;
+ (*backend)->hostname = hc->s->hostname;
+ if (strcmp(hc->s->scheme, "https") == 0) {
+ if (!ap_proxy_ssl_enable(NULL)) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ctx->s, APLOGNO(03252)
+ "mod_ssl not configured?");
+ return !OK;
+ }
+ (*backend)->is_ssl = 1;
+ }
+
+ }
+ status = hc_determine_connection(ctx, hc);
+ if (status == OK) {
+ (*backend)->addr = hc->cp->addr;
+ }
+ return status;
+}
+
+static apr_status_t hc_check_tcp(sctx_t *ctx, apr_pool_t *ptemp, proxy_worker *worker)
+{
+ int status;
+ proxy_conn_rec *backend = NULL;
+ proxy_worker *hc;
+
+ hc = hc_get_hcworker(ctx, worker, ptemp);
+
+ status = hc_get_backend("HCTCP", &backend, hc, ctx);
+ if (status == OK) {
+ backend->addr = hc->cp->addr;
+ status = ap_proxy_connect_backend("HCTCP", backend, hc, ctx->s);
+ /* does an unconditional ap_proxy_is_socket_connected() */
+ }
+ return backend_cleanup("HCTCP", backend, ctx->s, status);
+}
+
+static void hc_send(sctx_t *ctx, apr_pool_t *ptemp, const char *out, proxy_conn_rec *backend)
+{
+ apr_bucket_brigade *tmp_bb = apr_brigade_create(ptemp, ctx->ba);
+ ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, ctx->s, "%s", out);
+ APR_BRIGADE_INSERT_TAIL(tmp_bb, apr_bucket_pool_create(out, strlen(out), ptemp,
+ ctx->ba));
+ APR_BRIGADE_INSERT_TAIL(tmp_bb, apr_bucket_flush_create(ctx->ba));
+ ap_pass_brigade(backend->connection->output_filters, tmp_bb);
+ apr_brigade_destroy(tmp_bb);
+}
+
+static int hc_read_headers(sctx_t *ctx, request_rec *r)
+{
+ char buffer[HUGE_STRING_LEN];
+ int len;
+
+ len = ap_getline(buffer, sizeof(buffer), r, 1);
+ if (len <= 0) {
+ return !OK;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ctx->s, APLOGNO(03254)
+ "%s", buffer);
+ /* for the below, see ap_proxy_http_process_response() */
+ if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
+ int major;
+ char keepchar;
+ int proxy_status = OK;
+ const char *proxy_status_line = NULL;
+
+ major = buffer[5] - '0';
+ if ((major != 1) || (len >= sizeof(buffer)-1)) {
+ return !OK;
+ }
+
+ keepchar = buffer[12];
+ buffer[12] = '\0';
+ proxy_status = atoi(&buffer[9]);
+ if (keepchar != '\0') {
+ buffer[12] = keepchar;
+ } else {
+ buffer[12] = ' ';
+ buffer[13] = '\0';
+ }
+ proxy_status_line = apr_pstrdup(r->pool, &buffer[9]);
+ r->status = proxy_status;
+ r->status_line = proxy_status_line;
+ } else {
+ return !OK;
+ }
+ /* OK, 1st line is OK... scarf in the headers */
+ while ((len = ap_getline(buffer, sizeof(buffer), r, 1)) > 0) {
+ char *value, *end;
+ if (!(value = strchr(buffer, ':'))) {
+ return !OK;
+ }
+ ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, ctx->s, "%s", buffer);
+ *value = '\0';
+ ++value;
+ while (apr_isspace(*value))
+ ++value; /* Skip to start of value */
+ for (end = &value[strlen(value)-1]; end > value && apr_isspace(*end); --end)
+ *end = '\0';
+ apr_table_add(r->headers_out, buffer, value);
+ }
+ return OK;
+}
+
+static int hc_read_body (sctx_t *ctx, request_rec *r)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_bucket_brigade *bb;
+ int seen_eos = 0;
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ do {
+ apr_bucket *bucket, *cpy;
+ apr_size_t len = HUGE_STRING_LEN;
+
+ rv = ap_get_brigade(r->proto_input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, len);
+
+ if (rv != APR_SUCCESS) {
+ if (APR_STATUS_IS_TIMEUP(rv) || APR_STATUS_IS_EOF(rv)) {
+ rv = APR_SUCCESS;
+ break;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, ctx->s, APLOGNO(03300)
+ "Error reading response body");
+ break;
+ }
+
+ for (bucket = APR_BRIGADE_FIRST(bb);
+ bucket != APR_BRIGADE_SENTINEL(bb);
+ bucket = APR_BUCKET_NEXT(bucket))
+ {
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ seen_eos = 1;
+ break;
+ }
+ if (APR_BUCKET_IS_FLUSH(bucket)) {
+ continue;
+ }
+ rv = apr_bucket_copy(bucket, &cpy);
+ if (rv != APR_SUCCESS) {
+ break;
+ }
+ APR_BRIGADE_INSERT_TAIL(r->kept_body, cpy);
+ }
+ apr_brigade_cleanup(bb);
+ }
+ while (!seen_eos);
+ return (rv == APR_SUCCESS ? OK : !OK);
+}
+
+/*
+ * Send the HTTP OPTIONS, HEAD or GET request to the backend
+ * server associated w/ worker. If we have Conditions,
+ * then apply those to the resulting response, otherwise
+ * any status code 2xx or 3xx is considered "passing"
+ */
+static apr_status_t hc_check_http(sctx_t *ctx, apr_pool_t *ptemp, proxy_worker *worker)
+{
+ int status;
+ proxy_conn_rec *backend = NULL;
+ proxy_worker *hc;
+ conn_rec c;
+ request_rec *r;
+ wctx_t *wctx;
+ hc_condition_t *cond;
+ const char *method = NULL;
+
+ hc = hc_get_hcworker(ctx, worker, ptemp);
+ wctx = (wctx_t *)hc->context;
+
+ if ((status = hc_get_backend("HCOH", &backend, hc, ctx)) != OK) {
+ return backend_cleanup("HCOH", backend, ctx->s, status);
+ }
+ if ((status = ap_proxy_connect_backend("HCOH", backend, hc, ctx->s)) != OK) {
+ return backend_cleanup("HCOH", backend, ctx->s, status);
+ }
+
+ if (!backend->connection) {
+ if ((status = ap_proxy_connection_create("HCOH", backend, &c, ctx->s)) != OK) {
+ return backend_cleanup("HCOH", backend, ctx->s, status);
+ }
+ }
+ switch (hc->s->method) {
+ case OPTIONS:
+ if (!wctx->req) {
+ wctx->req = apr_psprintf(ctx->p,
+ "OPTIONS * HTTP/1.0\r\nHost: %s:%d\r\n\r\n",
+ hc->s->hostname, (int)hc->s->port);
+ }
+ method = "OPTIONS";
+ break;
+
+ case HEAD:
+ method = "HEAD";
+ /* fallthru */
+ case GET:
+ if (!method) { /* did we fall thru? If not, we are GET */
+ method = "GET";
+ }
+ if (!wctx->req) {
+ wctx->req = apr_psprintf(ctx->p,
+ "%s %s%s%s HTTP/1.0\r\nHost: %s:%d\r\n\r\n",
+ method,
+ (wctx->path ? wctx->path : ""),
+ (wctx->path && *hc->s->hcuri ? "/" : "" ),
+ (*hc->s->hcuri ? hc->s->hcuri : ""),
+ hc->s->hostname, (int)hc->s->port);
+ }
+ break;
+
+ default:
+ return backend_cleanup("HCOH", backend, ctx->s, !OK);
+ break;
+ }
+
+ hc_send(ctx, ptemp, wctx->req, backend);
+
+ r = create_request_rec(ptemp, backend->connection, method);
+ if ((status = hc_read_headers(ctx, r)) != OK) {
+ return backend_cleanup("HCOH", backend, ctx->s, status);
+ }
+ if (hc->s->method == GET) {
+ if ((status = hc_read_body(ctx, r)) != OK) {
+ return backend_cleanup("HCOH", backend, ctx->s, status);
+ }
+ }
+
+ if (*worker->s->hcexpr &&
+ (cond = (hc_condition_t *)apr_table_get(ctx->conditions, worker->s->hcexpr)) != NULL) {
+ const char *err;
+ int ok = ap_expr_exec(r, cond->pexpr, &err);
+ if (ok > 0) {
+ status = OK;
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s,
+ "Condition %s for %s (%s): passed", worker->s->hcexpr,
+ hc->s->name, worker->s->name);
+ } else if (ok < 0 || err) {
+ status = !OK;
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ctx->s, APLOGNO(03301)
+ "Error on checking condition %s for %s (%s): %s", worker->s->hcexpr,
+ hc->s->name, worker->s->name, err);
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, ctx->s,
+ "Condition %s for %s (%s) : failed", worker->s->hcexpr,
+ hc->s->name, worker->s->name);
+ status = !OK;
+ }
+ } else if (r->status < 200 || r->status > 399) {
+ status = !OK;
+ }
+ return backend_cleanup("HCOH", backend, ctx->s, status);
+}
+
+static void *hc_check(apr_thread_t *thread, void *b)
+{
+ baton_t *baton = (baton_t *)b;
+ sctx_t *ctx = baton->ctx;
+ apr_time_t now = baton->now;
+ proxy_worker *worker = baton->worker;
+ apr_pool_t *ptemp = baton->ptemp;
+ server_rec *s = ctx->s;
+ apr_status_t rv;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03256)
+ "%sHealth checking %s", (thread ? "Threaded " : ""), worker->s->name);
+
+ switch (worker->s->method) {
+ case TCP:
+ rv = hc_check_tcp(ctx, ptemp, worker);
+ break;
+
+ case OPTIONS:
+ case HEAD:
+ case GET:
+ rv = hc_check_http(ctx, ptemp, worker);
+ break;
+
+ default:
+ rv = APR_ENOTIMPL;
+ break;
+ }
+ if (rv == APR_ENOTIMPL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(03257)
+ "Somehow tried to use unimplemented hcheck method: %d",
+ (int)worker->s->method);
+ apr_pool_destroy(ptemp);
+ return NULL;
+ }
+ /* what state are we in ? */
+ if (PROXY_WORKER_IS_HCFAILED(worker)) {
+ if (rv == APR_SUCCESS) {
+ worker->s->pcount += 1;
+ if (worker->s->pcount >= worker->s->passes) {
+ ap_proxy_set_wstatus(PROXY_WORKER_HC_FAIL_FLAG, 0, worker);
+ ap_proxy_set_wstatus(PROXY_WORKER_IN_ERROR_FLAG, 0, worker);
+ worker->s->pcount = 0;
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03302)
+ "%sHealth check ENABLING %s", (thread ? "Threaded " : ""),
+ worker->s->name);
+
+ }
+ }
+ } else {
+ if (rv != APR_SUCCESS) {
+ worker->s->error_time = now;
+ worker->s->fcount += 1;
+ if (worker->s->fcount >= worker->s->fails) {
+ ap_proxy_set_wstatus(PROXY_WORKER_HC_FAIL_FLAG, 1, worker);
+ worker->s->fcount = 0;
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03303)
+ "%sHealth check DISABLING %s", (thread ? "Threaded " : ""),
+ worker->s->name);
+ }
+ }
+ }
+ worker->s->updated = now;
+ apr_pool_destroy(ptemp);
+ return NULL;
+}
+
+static apr_status_t hc_watchdog_callback(int state, void *data,
+ apr_pool_t *pool)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_time_t now = apr_time_now();
+ proxy_balancer *balancer;
+ sctx_t *ctx = (sctx_t *)data;
+ server_rec *s = ctx->s;
+ proxy_server_conf *conf;
+ switch (state) {
+ case AP_WATCHDOG_STATE_STARTING:
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03258)
+ "%s watchdog started.",
+ HCHECK_WATHCHDOG_NAME);
+#if HC_USE_THREADS
+ if (ctx->tpsize) {
+ rv = apr_thread_pool_create(&ctx->hctp, ctx->tpsize,
+ ctx->tpsize, ctx->p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, rv, s, APLOGNO(03312)
+ "apr_thread_pool_create() with %d threads failed",
+ ctx->tpsize);
+ /* we can continue on without the threadpools */
+ ctx->hctp = NULL;
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(03313)
+ "apr_thread_pool_create() with %d threads succeeded",
+ ctx->tpsize);
+ }
+ } else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(03314)
+ "Skipping apr_thread_pool_create()");
+ ctx->hctp = NULL;
+ }
+
+#endif
+ break;
+
+ case AP_WATCHDOG_STATE_RUNNING:
+ /* loop thru all workers */
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s,
+ "Run of %s watchdog.",
+ HCHECK_WATHCHDOG_NAME);
+ if (s) {
+ int i;
+ conf = (proxy_server_conf *) ap_get_module_config(s->module_config, &proxy_module);
+ balancer = (proxy_balancer *)conf->balancers->elts;
+ for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+ int n;
+ proxy_worker **workers;
+ proxy_worker *worker;
+ /* Have any new balancers or workers been added dynamically? */
+ ap_proxy_sync_balancer(balancer, s, conf);
+ workers = (proxy_worker **)balancer->workers->elts;
+ for (n = 0; n < balancer->workers->nelts; n++) {
+ worker = *workers;
+ if (!PROXY_WORKER_IS(worker, PROXY_WORKER_STOPPED) &&
+ (worker->s->method != NONE) &&
+ (now > worker->s->updated + worker->s->interval)) {
+ baton_t *baton;
+ /* This pool must last the lifetime of the (possible) thread */
+ apr_pool_t *ptemp;
+ apr_pool_create(&ptemp, ctx->p);
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s,
+ "Checking %s worker: %s [%d] (%pp)", balancer->s->name,
+ worker->s->name, worker->s->method, worker);
+
+ if ((rv = hc_init_worker(ctx, worker)) != APR_SUCCESS) {
+ return rv;
+ }
+ baton = apr_palloc(ptemp, sizeof(baton_t));
+ baton->ctx = ctx;
+ baton->now = now;
+ baton->worker = worker;
+ baton->ptemp = ptemp;
+
+ if (!ctx->hctp) {
+ hc_check(NULL, baton);
+ }
+#if HC_USE_THREADS
+ else {
+ rv = apr_thread_pool_push(ctx->hctp, hc_check, (void *)baton,
+ APR_THREAD_TASK_PRIORITY_NORMAL, NULL);
+ }
+#endif
+ }
+ workers++;
+ }
+ }
+ /* s = s->next; */
+ }
+ break;
+
+ case AP_WATCHDOG_STATE_STOPPING:
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03261)
+ "stopping %s watchdog.",
+ HCHECK_WATHCHDOG_NAME);
+#if HC_USE_THREADS
+ rv = apr_thread_pool_destroy(ctx->hctp);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, rv, s, APLOGNO(03315)
+ "apr_thread_pool_destroy() failed");
+ }
+#endif
+ ctx->hctp = NULL;
+ break;
+ }
+ return rv;
+}
+
+static int hc_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ apr_status_t rv;
+ sctx_t *ctx;
+
+ APR_OPTIONAL_FN_TYPE(ap_watchdog_get_instance) *hc_watchdog_get_instance;
+ APR_OPTIONAL_FN_TYPE(ap_watchdog_register_callback) *hc_watchdog_register_callback;
+
+ hc_watchdog_get_instance = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_get_instance);
+ hc_watchdog_register_callback = APR_RETRIEVE_OPTIONAL_FN(ap_watchdog_register_callback);
+ if (!hc_watchdog_get_instance || !hc_watchdog_register_callback) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(03262)
+ "mod_watchdog is required");
+ return !OK;
+ }
+ ctx = (sctx_t *) ap_get_module_config(s->module_config,
+ &proxy_hcheck_module);
+
+ rv = hc_watchdog_get_instance(&ctx->watchdog,
+ HCHECK_WATHCHDOG_NAME,
+ 0, 1, p);
+ if (rv) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03263)
+ "Failed to create watchdog instance (%s)",
+ HCHECK_WATHCHDOG_NAME);
+ return !OK;
+ }
+ rv = hc_watchdog_register_callback(ctx->watchdog,
+ apr_time_from_sec(HCHECK_WATHCHDOG_INTERVAL),
+ ctx,
+ hc_watchdog_callback);
+ if (rv) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(03264)
+ "Failed to register watchdog callback (%s)",
+ HCHECK_WATHCHDOG_NAME);
+ return !OK;
+ }
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03265)
+ "watchdog callback registered (%s)", HCHECK_WATHCHDOG_NAME);
+ return OK;
+}
+
+static void hc_show_exprs(request_rec *r)
+{
+ const apr_table_entry_t *elts;
+ const apr_array_header_t *hdr;
+ int i;
+ sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config,
+ &proxy_hcheck_module);
+ if (apr_is_empty_table(ctx->conditions))
+ return;
+
+ ap_rputs("\n\n<table>"
+ "<tr><th colspan='2'>Health check cond. expressions:</th></tr>\n"
+ "<tr><th>Expr name</th><th>Expression</th></tr>\n", r);
+
+ hdr = apr_table_elts(ctx->conditions);
+ elts = (const apr_table_entry_t *) hdr->elts;
+ for (i = 0; i < hdr->nelts; ++i) {
+ hc_condition_t *cond;
+ if (!elts[i].key) {
+ continue;
+ }
+ cond = (hc_condition_t *)elts[i].val;
+ ap_rprintf(r, "<tr><td>%s</td><td>%s</td></tr>\n",
+ ap_escape_html(r->pool, elts[i].key),
+ ap_escape_html(r->pool, cond->expr));
+ }
+ ap_rputs("</table><hr/>\n", r);
+}
+
+static void hc_select_exprs(request_rec *r, const char *expr)
+{
+ const apr_table_entry_t *elts;
+ const apr_array_header_t *hdr;
+ int i;
+ sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config,
+ &proxy_hcheck_module);
+ if (apr_is_empty_table(ctx->conditions))
+ return;
+
+ hdr = apr_table_elts(ctx->conditions);
+ elts = (const apr_table_entry_t *) hdr->elts;
+ for (i = 0; i < hdr->nelts; ++i) {
+ if (!elts[i].key) {
+ continue;
+ }
+ ap_rprintf(r, "<option value='%s' %s >%s</option>\n",
+ ap_escape_html(r->pool, elts[i].key),
+ (!strcmp(elts[i].key, expr)) ? "selected" : "",
+ ap_escape_html(r->pool, elts[i].key));
+ }
+}
+
+static int hc_valid_expr(request_rec *r, const char *expr)
+{
+ const apr_table_entry_t *elts;
+ const apr_array_header_t *hdr;
+ int i;
+ sctx_t *ctx = (sctx_t *) ap_get_module_config(r->server->module_config,
+ &proxy_hcheck_module);
+ if (apr_is_empty_table(ctx->conditions))
+ return 0;
+
+ hdr = apr_table_elts(ctx->conditions);
+ elts = (const apr_table_entry_t *) hdr->elts;
+ for (i = 0; i < hdr->nelts; ++i) {
+ if (!elts[i].key) {
+ continue;
+ }
+ if (!strcmp(elts[i].key, expr))
+ return 1;
+ }
+ return 0;
+}
+
+static const char *hc_get_body(request_rec *r)
+{
+ apr_off_t length;
+ apr_size_t len;
+ apr_status_t rv;
+ char *buf;
+
+ if (!r || !r->kept_body)
+ return "";
+
+ rv = apr_brigade_length(r->kept_body, 1, &length);
+ len = (apr_size_t)length;
+ if (rv != APR_SUCCESS || len == 0)
+ return "";
+
+ buf = apr_palloc(r->pool, len + 1);
+ rv = apr_brigade_flatten(r->kept_body, buf, &len);
+ if (rv != APR_SUCCESS)
+ return "";
+ buf[len] = '\0'; /* ensure */
+ return (const char*)buf;
+}
+
+static const char *hc_expr_var_fn(ap_expr_eval_ctx_t *ctx, const void *data)
+{
+ char *var = (char *)data;
+
+ if (var && *var && ctx->r && strcasecmp(var, "BODY") == 0) {
+ return hc_get_body(ctx->r);
+ }
+ return NULL;
+}
+
+static const char *hc_expr_func_fn(ap_expr_eval_ctx_t *ctx, const void *data,
+ const char *arg)
+{
+ char *var = (char *)arg;
+
+ if (var && *var && ctx->r && strcasecmp(var, "BODY") == 0) {
+ return hc_get_body(ctx->r);
+ }
+ return NULL;
+}
+
+static int hc_expr_lookup(ap_expr_lookup_parms *parms)
+{
+ switch (parms->type) {
+ case AP_EXPR_FUNC_VAR:
+ /* for now, we just handle everything that starts with HC_.
+ */
+ if (strncasecmp(parms->name, "HC_", 3) == 0) {
+ *parms->func = hc_expr_var_fn;
+ *parms->data = parms->name + 3;
+ return OK;
+ }
+ break;
+ case AP_EXPR_FUNC_STRING:
+ /* Function HC() is implemented by us.
+ */
+ if (strcasecmp(parms->name, "HC") == 0) {
+ *parms->func = hc_expr_func_fn;
+ *parms->data = parms->arg;
+ return OK;
+ }
+ break;
+ }
+ return DECLINED;
+}
+
+static const command_rec command_table[] = {
+ AP_INIT_RAW_ARGS("ProxyHCTemplate", set_hc_template, NULL, OR_FILEINFO,
+ "Health check template"),
+ AP_INIT_RAW_ARGS("ProxyHCExpr", set_hc_condition, NULL, OR_FILEINFO,
+ "Define a health check condition ruleset expression"),
+#if HC_USE_THREADS
+ AP_INIT_TAKE1("ProxyHCTPsize", set_hc_tpsize, NULL, OR_FILEINFO,
+ "Set size of health check thread pool"),
+#endif
+ { NULL }
+};
+
+static void hc_register_hooks(apr_pool_t *p)
+{
+ static const char *const aszPre[] = { "mod_proxy_balancer.c", "mod_proxy.c", NULL};
+ static const char *const aszSucc[] = { "mod_watchdog.c", NULL};
+ APR_REGISTER_OPTIONAL_FN(set_worker_hc_param);
+ APR_REGISTER_OPTIONAL_FN(hc_show_exprs);
+ APR_REGISTER_OPTIONAL_FN(hc_select_exprs);
+ APR_REGISTER_OPTIONAL_FN(hc_valid_expr);
+ ap_hook_post_config(hc_post_config, aszPre, aszSucc, APR_HOOK_LAST);
+ ap_hook_expr_lookup(hc_expr_lookup, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+/* the main config structure */
+
+AP_DECLARE_MODULE(proxy_hcheck) =
+{
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-dir config structures */
+ NULL, /* merge per-dir config structures */
+ hc_create_config, /* create per-server config structures */
+ NULL, /* merge per-server config structures */
+ command_table, /* table of config file commands */
+ hc_register_hooks /* register hooks */
+};
diff --git a/modules/proxy/mod_proxy_http.mak b/modules/proxy/mod_proxy_http.mak
index 8849723c..c3811872 100644
--- a/modules/proxy/mod_proxy_http.mak
+++ b/modules/proxy/mod_proxy_http.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_http.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_http.so" /d LONG_NAME="proxy_http_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_http.so" /d LONG_NAME="proxy_http_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_http - Win32 Debug"
"$(INTDIR)\mod_proxy_http.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_http.so" /d LONG_NAME="proxy_http_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_http.so" /d LONG_NAME="proxy_http_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_scgi.c b/modules/proxy/mod_proxy_scgi.c
index 2cbe8483..cede817a 100644
--- a/modules/proxy/mod_proxy_scgi.c
+++ b/modules/proxy/mod_proxy_scgi.c
@@ -509,7 +509,7 @@ static int scgi_request_status(int *status, request_rec *r)
*status = HTTP_INTERNAL_SERVER_ERROR;
return *status;
}
- } while(0);
+ } while (0);
return OK;
/* break; */
@@ -530,7 +530,7 @@ static int scgi_handler(request_rec *r, proxy_worker *worker,
int status;
proxy_conn_rec *backend = NULL;
apr_pool_t *p = r->pool;
- apr_uri_t *uri = apr_palloc(r->pool, sizeof(*uri));
+ apr_uri_t *uri;
char dummy;
if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) {
@@ -548,6 +548,7 @@ static int scgi_handler(request_rec *r, proxy_worker *worker,
backend->is_ssl = 0;
/* Step One: Determine Who To Connect To */
+ uri = apr_palloc(p, sizeof(*uri));
status = ap_proxy_determine_connection(p, r, conf, worker, backend,
uri, &url, proxyname, proxyport,
&dummy, 1);
diff --git a/modules/proxy/mod_proxy_scgi.mak b/modules/proxy/mod_proxy_scgi.mak
index c44270a0..7ffb2484 100644
--- a/modules/proxy/mod_proxy_scgi.mak
+++ b/modules/proxy/mod_proxy_scgi.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_scgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_scgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_scgi.so" /d LONG_NAME="proxy_scgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_scgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_scgi.so" /d LONG_NAME="proxy_scgi_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_scgi - Win32 Debug"
"$(INTDIR)\mod_proxy_scgi.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_scgi.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_scgi.so" /d LONG_NAME="proxy_scgi_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_scgi.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_scgi.so" /d LONG_NAME="proxy_scgi_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/mod_proxy_wstunnel.mak b/modules/proxy/mod_proxy_wstunnel.mak
index 635de591..530715fe 100644
--- a/modules/proxy/mod_proxy_wstunnel.mak
+++ b/modules/proxy/mod_proxy_wstunnel.mak
@@ -363,14 +363,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_proxy_wstunnel.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_wstunnel.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_wstunnel.so" /d LONG_NAME="proxy_wstunnel_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_wstunnel.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_wstunnel.so" /d LONG_NAME="proxy_wstunnel_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_proxy_wstunnel - Win32 Debug"
"$(INTDIR)\mod_proxy_wstunnel.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_wstunnel.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_wstunnel.so" /d LONG_NAME="proxy_wstunnel_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_wstunnel.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_wstunnel.so" /d LONG_NAME="proxy_wstunnel_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index 763073c1..0d2c8563 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -54,24 +54,6 @@ typedef struct {
const char *proxy_auth; /* Proxy authorization */
} forward_info;
-/* Keep synced with mod_proxy.h! */
-static struct wstat {
- unsigned int bit;
- char flag;
- const char *name;
-} wstat_tbl[] = {
- {PROXY_WORKER_INITIALIZED, PROXY_WORKER_INITIALIZED_FLAG, "Init "},
- {PROXY_WORKER_IGNORE_ERRORS, PROXY_WORKER_IGNORE_ERRORS_FLAG, "Ign "},
- {PROXY_WORKER_DRAIN, PROXY_WORKER_DRAIN_FLAG, "Drn "},
- {PROXY_WORKER_IN_SHUTDOWN, PROXY_WORKER_IN_SHUTDOWN_FLAG, "Shut "},
- {PROXY_WORKER_DISABLED, PROXY_WORKER_DISABLED_FLAG, "Dis "},
- {PROXY_WORKER_STOPPED, PROXY_WORKER_STOPPED_FLAG, "Stop "},
- {PROXY_WORKER_IN_ERROR, PROXY_WORKER_IN_ERROR_FLAG, "Err "},
- {PROXY_WORKER_HOT_STANDBY, PROXY_WORKER_HOT_STANDBY_FLAG, "Stby "},
- {PROXY_WORKER_FREE, PROXY_WORKER_FREE_FLAG, "Free "},
- {0x0, '\0', NULL}
-};
-
/* Global balancer counter */
int PROXY_DECLARE_DATA proxy_lb_workers = 0;
static int lb_workers_limit = 0;
@@ -1375,7 +1357,7 @@ static apr_status_t connection_cleanup(void *theconn)
* If the connection pool is NULL the worker
* cleanup has been run. Just return.
*/
- if (!worker->cp) {
+ if (!worker->cp->pool) {
return APR_SUCCESS;
}
@@ -1498,10 +1480,11 @@ static apr_status_t connection_constructor(void **resource, void *params,
static apr_status_t connection_destructor(void *resource, void *params,
apr_pool_t *pool)
{
- proxy_conn_rec *conn = (proxy_conn_rec *)resource;
+ proxy_worker *worker = params;
/* Destroy the pool only if not called from reslist_destroy */
- if (conn->worker->cp->pool) {
+ if (worker->cp->pool) {
+ proxy_conn_rec *conn = resource;
apr_pool_destroy(conn->pool);
}
@@ -1698,6 +1681,7 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
memset(wshared, 0, sizeof(proxy_worker_shared));
+ wshared->port = (uri.port ? uri.port : ap_proxy_port_of_scheme(uri.scheme));
if (uri.port && uri.port == ap_proxy_port_of_scheme(uri.scheme)) {
uri.port = 0;
}
@@ -1712,11 +1696,13 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
if (PROXY_STRNCPY(wshared->hostname, uri.hostname) != APR_SUCCESS) {
return apr_psprintf(p, "worker hostname (%s) too long", uri.hostname);
}
- wshared->port = uri.port;
wshared->flush_packets = flush_off;
wshared->flush_wait = PROXY_FLUSH_WAIT;
wshared->is_address_reusable = 1;
wshared->lbfactor = 1;
+ wshared->passes = 1;
+ wshared->fails = 1;
+ wshared->interval = apr_time_from_sec(HCHECK_WATHCHDOG_DEFAULT_INTERVAL);
wshared->smax = -1;
wshared->hash.def = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_DEFAULT);
wshared->hash.fnv = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_FNV);
@@ -1730,6 +1716,9 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
else {
*wshared->uds_path = '\0';
}
+ if (!balancer) {
+ wshared->status |= PROXY_WORKER_IGNORE_ERRORS;
+ }
(*worker)->hash = wshared->hash;
(*worker)->context = NULL;
@@ -1896,7 +1885,14 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke
server_rec *s)
{
if (worker->s->status & PROXY_WORKER_IN_ERROR) {
- if (apr_time_now() > worker->s->error_time + worker->s->retry) {
+ if (PROXY_WORKER_IS(worker, PROXY_WORKER_STOPPED)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(3305)
+ "%s: Won't retry worker (%s): stopped",
+ proxy_function, worker->s->hostname);
+ return DECLINED;
+ }
+ if ((worker->s->status & PROXY_WORKER_IGNORE_ERRORS)
+ || apr_time_now() > worker->s->error_time + worker->s->retry) {
++worker->s->retries;
worker->s->status &= ~PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00932)
@@ -2430,7 +2426,7 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
#endif
#if USE_ALTERNATE_IS_CONNECTED && defined(APR_MSG_PEEK)
-static int is_socket_connected(apr_socket_t *socket)
+PROXY_DECLARE(int) ap_proxy_is_socket_connected(apr_socket_t *socket)
{
apr_pollfd_t pfds[1];
apr_status_t status;
@@ -2468,7 +2464,7 @@ static int is_socket_connected(apr_socket_t *socket)
}
#else
-static int is_socket_connected(apr_socket_t *sock)
+PROXY_DECLARE(int) ap_proxy_is_socket_connected(apr_socket_t *sock)
{
apr_size_t buffer_len = 1;
@@ -2590,12 +2586,12 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend,
}
-#if APR_HAVE_SYS_UN_H
/* TODO: In APR 2.x: Extend apr_sockaddr_t to possibly be a path !!! */
PROXY_DECLARE(apr_status_t) ap_proxy_connect_uds(apr_socket_t *sock,
const char *uds_path,
apr_pool_t *p)
{
+#if APR_HAVE_SYS_UN_H
apr_status_t rv;
apr_os_sock_t rawsock;
apr_interval_time_t t;
@@ -2637,8 +2633,10 @@ PROXY_DECLARE(apr_status_t) ap_proxy_connect_uds(apr_socket_t *sock,
}
return APR_SUCCESS;
-}
+#else
+ return APR_ENOTIMPL;
#endif
+}
PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
proxy_conn_rec *conn,
@@ -2657,7 +2655,7 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function,
(proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
if (conn->sock) {
- if (!(connected = is_socket_connected(conn->sock))) {
+ if (!(connected = ap_proxy_is_socket_connected(conn->sock))) {
/* This clears conn->scpool (and associated data), so backup and
* restore any ssl_hostname for this connection set earlier by
* ap_proxy_determine_connection().
@@ -3073,7 +3071,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_set_wstatus(char c, int set, proxy_worker *
{
unsigned int *status = &w->s->status;
char flag = toupper(c);
- struct wstat *pwt = wstat_tbl;
+ proxy_wstat_t *pwt = proxy_wstat_tbl;
while (pwt->bit) {
if (flag == pwt->flag) {
if (set)
@@ -3091,12 +3089,15 @@ PROXY_DECLARE(char *) ap_proxy_parse_wstatus(apr_pool_t *p, proxy_worker *w)
{
char *ret = "";
unsigned int status = w->s->status;
- struct wstat *pwt = wstat_tbl;
+ proxy_wstat_t *pwt = proxy_wstat_tbl;
while (pwt->bit) {
if (status & pwt->bit)
ret = apr_pstrcat(p, ret, pwt->name, NULL);
pwt++;
}
+ if (!*ret) {
+ ret = "??? ";
+ }
if (PROXY_WORKER_IS_USABLE(w))
ret = apr_pstrcat(p, ret, "Ok ", NULL);
return ret;
@@ -3498,7 +3499,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
}
proxy_run_fixups(r);
- ap_proxy_clear_connection(r, r->headers_in);
+ if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
+ return HTTP_BAD_REQUEST;
+ }
/* send request headers */
headers_in_array = apr_table_elts(r->headers_in);
@@ -3624,6 +3627,8 @@ static proxy_schemes_t pschemes[] =
{"fcgi", 8000},
{"ajp", AJP13_DEF_PORT},
{"scgi", SCGI_DEF_PORT},
+ {"h2c", DEFAULT_HTTP_PORT},
+ {"h2", DEFAULT_HTTPS_PORT},
{ NULL, 0xFFFF } /* unknown port */
};
@@ -3767,6 +3772,17 @@ PROXY_DECLARE(apr_status_t) ap_proxy_transfer_between_connections(
return rv;
}
+PROXY_DECLARE (const char *) ap_proxy_show_hcmethod(hcmethod_t method)
+{
+ proxy_hcmethods_t *m = proxy_hcmethods;
+ for (; m->name; m++) {
+ if (m->method == method) {
+ return m->name;
+ }
+ }
+ return "???";
+}
+
void proxy_util_register_hooks(apr_pool_t *p)
{
APR_REGISTER_OPTIONAL_FN(ap_proxy_retry_worker);
diff --git a/modules/session/mod_session.mak b/modules/session/mod_session.mak
index b2ecb4ba..ce92fa61 100644
--- a/modules/session/mod_session.mak
+++ b/modules/session/mod_session.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_session.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_session.so" /d LONG_NAME="session_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_session.so" /d LONG_NAME="session_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_session - Win32 Debug"
"$(INTDIR)\mod_session.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_session.so" /d LONG_NAME="session_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_session.so" /d LONG_NAME="session_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/session/mod_session_cookie.mak b/modules/session/mod_session_cookie.mak
index 9bdf1c68..014f1e3b 100644
--- a/modules/session/mod_session_cookie.mak
+++ b/modules/session/mod_session_cookie.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_session_cookie.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_cookie.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_session_cookie.so" /d LONG_NAME="session_cookie_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_cookie.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_session_cookie.so" /d LONG_NAME="session_cookie_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_session_cookie - Win32 Debug"
"$(INTDIR)\mod_session_cookie.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_cookie.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_session_cookie.so" /d LONG_NAME="session_cookie_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_cookie.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_session_cookie.so" /d LONG_NAME="session_cookie_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/session/mod_session_crypto.mak b/modules/session/mod_session_crypto.mak
index f80dacd1..13a4c67e 100644
--- a/modules/session/mod_session_crypto.mak
+++ b/modules/session/mod_session_crypto.mak
@@ -359,14 +359,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_session_crypto.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_crypto.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_session_crypto.so" /d LONG_NAME="session_crypto_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_crypto.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_session_crypto.so" /d LONG_NAME="session_crypto_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_session_crypto - Win32 Debug"
"$(INTDIR)\mod_session_crypto.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_crypto.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_session_crypto.so" /d LONG_NAME="session_crypto_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_crypto.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_session_crypto.so" /d LONG_NAME="session_crypto_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/session/mod_session_dbd.mak b/modules/session/mod_session_dbd.mak
index b72f91f1..e16c61ef 100644
--- a/modules/session/mod_session_dbd.mak
+++ b/modules/session/mod_session_dbd.mak
@@ -387,14 +387,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_session_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_session_dbd.so" /d LONG_NAME="session_dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_session_dbd.so" /d LONG_NAME="session_dbd_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_session_dbd - Win32 Debug"
"$(INTDIR)\mod_session_dbd.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_session_dbd.so" /d LONG_NAME="session_dbd_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_session_dbd.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_session_dbd.so" /d LONG_NAME="session_dbd_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/slotmem/mod_slotmem_plain.mak b/modules/slotmem/mod_slotmem_plain.mak
index 435ebf70..4e7891a0 100644
--- a/modules/slotmem/mod_slotmem_plain.mak
+++ b/modules/slotmem/mod_slotmem_plain.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_slotmem_plain.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_plain.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_slotmem_plain.so" /d LONG_NAME="slotmem_plain_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_plain.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_slotmem_plain.so" /d LONG_NAME="slotmem_plain_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_slotmem_plain - Win32 Debug"
"$(INTDIR)\mod_slotmem_plain.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_plain.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_slotmem_plain.so" /d LONG_NAME="slotmem_plain_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_plain.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_slotmem_plain.so" /d LONG_NAME="slotmem_plain_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/slotmem/mod_slotmem_shm.mak b/modules/slotmem/mod_slotmem_shm.mak
index c41bf4ee..e7e64b8e 100644
--- a/modules/slotmem/mod_slotmem_shm.mak
+++ b/modules/slotmem/mod_slotmem_shm.mak
@@ -331,14 +331,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_slotmem_shm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_shm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_slotmem_shm.so" /d LONG_NAME="slotmem_shm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_shm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_slotmem_shm.so" /d LONG_NAME="slotmem_shm_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_slotmem_shm - Win32 Debug"
"$(INTDIR)\mod_slotmem_shm.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_shm.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_slotmem_shm.so" /d LONG_NAME="slotmem_shm_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_slotmem_shm.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_slotmem_shm.so" /d LONG_NAME="slotmem_shm_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c
index 219e3337..f8e71b33 100644
--- a/modules/ssl/mod_ssl.c
+++ b/modules/ssl/mod_ssl.c
@@ -119,7 +119,7 @@ static const command_rec ssl_config_cmds[] = {
SSL_CMD_SRV(CARevocationFile, TAKE1,
"SSL CA Certificate Revocation List (CRL) file "
"('/path/to/file' - PEM encoded)")
- SSL_CMD_SRV(CARevocationCheck, TAKE1,
+ SSL_CMD_SRV(CARevocationCheck, RAW_ARGS,
"SSL CA Certificate Revocation List (CRL) checking mode")
SSL_CMD_ALL(VerifyClient, TAKE1,
"SSL Client verify type "
@@ -197,7 +197,7 @@ static const command_rec ssl_config_cmds[] = {
SSL_CMD_SRV(ProxyCARevocationFile, TAKE1,
"SSL Proxy: CA Certificate Revocation List (CRL) file "
"('/path/to/file' - PEM encoded)")
- SSL_CMD_SRV(ProxyCARevocationCheck, TAKE1,
+ SSL_CMD_SRV(ProxyCARevocationCheck, RAW_ARGS,
"SSL Proxy: CA Certificate Revocation List (CRL) checking mode")
SSL_CMD_SRV(ProxyMachineCertificateFile, TAKE1,
"SSL Proxy: file containing client certificates "
diff --git a/modules/ssl/mod_ssl.mak b/modules/ssl/mod_ssl.mak
index 6826ba5c..a3bd304a 100644
--- a/modules/ssl/mod_ssl.mak
+++ b/modules/ssl/mod_ssl.mak
@@ -470,14 +470,14 @@ SOURCE=..\..\build\win32\httpd.rc
"$(INTDIR)\mod_ssl.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ssl.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "NDEBUG" /d BIN_NAME="mod_ssl.so" /d LONG_NAME="proxy_ssl_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ssl.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_ssl.so" /d LONG_NAME="proxy_ssl_module for Apache" $(SOURCE)
!ELSEIF "$(CFG)" == "mod_ssl - Win32 Debug"
"$(INTDIR)\mod_ssl.res" : $(SOURCE) "$(INTDIR)"
- $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ssl.res" /i "../../include" /i "../../srclib/apr/include" /i "\local0\asf\build\httpd-2.4\build\win32" /d "_DEBUG" /d BIN_NAME="mod_ssl.so" /d LONG_NAME="proxy_ssl_module for Apache" $(SOURCE)
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_ssl.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_ssl.so" /d LONG_NAME="proxy_ssl_module for Apache" $(SOURCE)
!ENDIF
diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c
index a3d5af52..129a01ff 100644
--- a/modules/ssl/ssl_engine_config.c
+++ b/modules/ssl/ssl_engine_config.c
@@ -121,7 +121,7 @@ static void modssl_ctx_init(modssl_ctx_t *mctx, apr_pool_t *p)
mctx->crl_path = NULL;
mctx->crl_file = NULL;
- mctx->crl_check_mode = SSL_CRLCHECK_UNSET;
+ mctx->crl_check_mask = UNSET;
mctx->auth.ca_cert_path = NULL;
mctx->auth.ca_cert_file = NULL;
@@ -271,7 +271,7 @@ static void modssl_ctx_cfg_merge(apr_pool_t *p,
cfgMerge(crl_path, NULL);
cfgMerge(crl_file, NULL);
- cfgMerge(crl_check_mode, SSL_CRLCHECK_UNSET);
+ cfgMergeInt(crl_check_mask);
cfgMergeString(auth.ca_cert_path);
cfgMergeString(auth.ca_cert_file);
@@ -1000,23 +1000,38 @@ const char *ssl_cmd_SSLCARevocationFile(cmd_parms *cmd,
static const char *ssl_cmd_crlcheck_parse(cmd_parms *parms,
const char *arg,
- ssl_crlcheck_t *mode)
+ int *mask)
{
- if (strcEQ(arg, "none")) {
- *mode = SSL_CRLCHECK_NONE;
+ const char *w;
+
+ w = ap_getword_conf(parms->temp_pool, &arg);
+ if (strcEQ(w, "none")) {
+ *mask = SSL_CRLCHECK_NONE;
}
- else if (strcEQ(arg, "leaf")) {
- *mode = SSL_CRLCHECK_LEAF;
+ else if (strcEQ(w, "leaf")) {
+ *mask = SSL_CRLCHECK_LEAF;
}
- else if (strcEQ(arg, "chain")) {
- *mode = SSL_CRLCHECK_CHAIN;
+ else if (strcEQ(w, "chain")) {
+ *mask = SSL_CRLCHECK_CHAIN;
}
else {
return apr_pstrcat(parms->temp_pool, parms->cmd->name,
- ": Invalid argument '", arg, "'",
+ ": Invalid argument '", w, "'",
NULL);
}
+ while (*arg) {
+ w = ap_getword_conf(parms->temp_pool, &arg);
+ if (strcEQ(w, "no_crl_for_cert_ok")) {
+ *mask |= SSL_CRLCHECK_NO_CRL_FOR_CERT_OK;
+ }
+ else {
+ return apr_pstrcat(parms->temp_pool, parms->cmd->name,
+ ": Invalid argument '", w, "'",
+ NULL);
+ }
+ }
+
return NULL;
}
@@ -1026,7 +1041,7 @@ const char *ssl_cmd_SSLCARevocationCheck(cmd_parms *cmd,
{
SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
- return ssl_cmd_crlcheck_parse(cmd, arg, &sc->server->crl_check_mode);
+ return ssl_cmd_crlcheck_parse(cmd, arg, &sc->server->crl_check_mask);
}
static const char *ssl_cmd_verify_parse(cmd_parms *parms,
@@ -1540,7 +1555,7 @@ const char *ssl_cmd_SSLProxyCARevocationCheck(cmd_parms *cmd,
{
SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
- return ssl_cmd_crlcheck_parse(cmd, arg, &sc->proxy->crl_check_mode);
+ return ssl_cmd_crlcheck_parse(cmd, arg, &sc->proxy->crl_check_mask);
}
const char *ssl_cmd_SSLProxyMachineCertificateFile(cmd_parms *cmd,
diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c
index 797fbd12..9adca48a 100644
--- a/modules/ssl/ssl_engine_init.c
+++ b/modules/ssl/ssl_engine_init.c
@@ -787,14 +787,20 @@ static apr_status_t ssl_init_ctx_crl(server_rec *s,
X509_STORE *store = SSL_CTX_get_cert_store(mctx->ssl_ctx);
unsigned long crlflags = 0;
char *cfgp = mctx->pkp ? "SSLProxy" : "SSL";
+ int crl_check_mode;
+
+ if (mctx->crl_check_mask == UNSET) {
+ mctx->crl_check_mask = SSL_CRLCHECK_NONE;
+ }
+ crl_check_mode = mctx->crl_check_mask & ~SSL_CRLCHECK_FLAGS;
/*
* Configure Certificate Revocation List (CRL) Details
*/
if (!(mctx->crl_file || mctx->crl_path)) {
- if (mctx->crl_check_mode == SSL_CRLCHECK_LEAF ||
- mctx->crl_check_mode == SSL_CRLCHECK_CHAIN) {
+ if (crl_check_mode == SSL_CRLCHECK_LEAF ||
+ crl_check_mode == SSL_CRLCHECK_CHAIN) {
ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01899)
"Host %s: CRL checking has been enabled, but "
"neither %sCARevocationFile nor %sCARevocationPath "
@@ -816,7 +822,7 @@ static apr_status_t ssl_init_ctx_crl(server_rec *s,
return ssl_die(s);
}
- switch (mctx->crl_check_mode) {
+ switch (crl_check_mode) {
case SSL_CRLCHECK_LEAF:
crlflags = X509_V_FLAG_CRL_CHECK;
break;
diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c
index 77c48482..ea231932 100644
--- a/modules/ssl/ssl_engine_io.c
+++ b/modules/ssl/ssl_engine_io.c
@@ -1092,6 +1092,9 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
if (sslconn->is_proxy) {
#ifdef HAVE_TLSEXT
apr_ipsubnet_t *ip;
+#ifdef HAVE_TLS_ALPN
+ const char *alpn_note;
+#endif
#endif
const char *hostname_note = apr_table_get(c->notes,
"proxy-request-hostname");
@@ -1101,6 +1104,41 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
sc = mySrvConfig(server);
#ifdef HAVE_TLSEXT
+#ifdef HAVE_TLS_ALPN
+ alpn_note = apr_table_get(c->notes, "proxy-request-alpn-protos");
+ if (alpn_note) {
+ char *protos, *s, *p, *last;
+ apr_size_t len;
+
+ s = protos = apr_pcalloc(c->pool, strlen(alpn_note)+1);
+ p = apr_pstrdup(c->pool, alpn_note);
+ while ((p = apr_strtok(p, ", ", &last))) {
+ len = last - p - (*last? 1 : 0);
+ if (len > 255) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(03309)
+ "ALPN proxy protocol identifier too long: %s",
+ p);
+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, server);
+ return APR_EGENERAL;
+ }
+ *s++ = (unsigned char)len;
+ while (len--) {
+ *s++ = *p++;
+ }
+ p = NULL;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "setting alpn protos from '%s', protolen=%d",
+ alpn_note, (int)(s - protos));
+ if (protos != s && SSL_set_alpn_protos(filter_ctx->pssl,
+ (unsigned char *)protos,
+ s - protos)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(03310)
+ "error setting alpn protos from '%s'", alpn_note);
+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_WARNING, server);
+ }
+ }
+#endif /* defined HAVE_TLS_ALPN */
/*
* Enable SNI for backend requests. Make sure we don't do it for
* pure SSLv3 connections, and also prevent IP addresses
@@ -1151,6 +1189,8 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
}
}
if ((sc->proxy_ssl_check_peer_name != SSL_ENABLED_FALSE) &&
+ ((sc->proxy_ssl_check_peer_cn != SSL_ENABLED_FALSE) ||
+ (sc->proxy_ssl_check_peer_name == SSL_ENABLED_TRUE)) &&
hostname_note) {
apr_table_unset(c->notes, "proxy-request-hostname");
if (!cert
@@ -1162,7 +1202,7 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx)
"for hostname %s", hostname_note);
}
}
- else if ((sc->proxy_ssl_check_peer_cn != SSL_ENABLED_FALSE) &&
+ else if ((sc->proxy_ssl_check_peer_cn == SSL_ENABLED_TRUE) &&
hostname_note) {
const char *hostname;
int match = 0;
diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c
index 17fd7db3..8b6149d8 100644
--- a/modules/ssl/ssl_engine_kernel.c
+++ b/modules/ssl/ssl_engine_kernel.c
@@ -727,6 +727,7 @@ int ssl_hook_Access(request_rec *r)
* on this connection.
*/
apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "verify-client");
+ SSL_set_verify(ssl, verify_old, ssl_callback_SSLVerify);
return HTTP_FORBIDDEN;
}
/* optimization */
@@ -1553,22 +1554,24 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx)
SSLDirConfigRec *dc = r ? myDirConfig(r) : NULL;
SSLConnRec *sslconn = myConnConfig(conn);
modssl_ctx_t *mctx = myCtxConfig(sslconn, sc);
+ int crl_check_mode = mctx->crl_check_mask & ~SSL_CRLCHECK_FLAGS;
/* Get verify ingredients */
int errnum = X509_STORE_CTX_get_error(ctx);
int errdepth = X509_STORE_CTX_get_error_depth(ctx);
int depth, verify;
+
/*
* Log verification information
*/
ssl_log_cxerror(SSLLOG_MARK, APLOG_DEBUG, 0, conn,
X509_STORE_CTX_get_current_cert(ctx), APLOGNO(02275)
"Certificate Verification, depth %d, "
- "CRL checking mode: %s", errdepth,
- mctx->crl_check_mode == SSL_CRLCHECK_CHAIN ?
- "chain" : (mctx->crl_check_mode == SSL_CRLCHECK_LEAF ?
- "leaf" : "none"));
+ "CRL checking mode: %s (%x)", errdepth,
+ crl_check_mode == SSL_CRLCHECK_CHAIN ? "chain" :
+ crl_check_mode == SSL_CRLCHECK_LEAF ? "leaf" : "none",
+ mctx->crl_check_mask);
/*
* Check for optionally acceptable non-verifiable issuer situation
@@ -1617,6 +1620,17 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx)
X509_STORE_CTX_set_error(ctx, -1);
}
+ if (!ok && errnum == X509_V_ERR_UNABLE_TO_GET_CRL
+ && (mctx->crl_check_mask & SSL_CRLCHECK_NO_CRL_FOR_CERT_OK)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, conn,
+ "Certificate Verification: Temporary error (%d): %s: "
+ "optional therefore we're accepting the certificate",
+ errnum, X509_verify_cert_error_string(errnum));
+ X509_STORE_CTX_set_error(ctx, X509_V_OK);
+ errnum = X509_V_OK;
+ ok = TRUE;
+ }
+
#ifndef OPENSSL_NO_OCSP
/*
* Perform OCSP-based revocation checks
diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h
index 70b3ac22..7f6f9fd9 100644
--- a/modules/ssl/ssl_private.h
+++ b/modules/ssl/ssl_private.h
@@ -343,13 +343,15 @@ typedef enum {
|| (errnum == X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE))
/**
- * CRL checking modes
+ * CRL checking mask (mode | flags)
*/
typedef enum {
- SSL_CRLCHECK_UNSET = UNSET,
- SSL_CRLCHECK_NONE = 0,
- SSL_CRLCHECK_LEAF = 1,
- SSL_CRLCHECK_CHAIN = 2
+ SSL_CRLCHECK_NONE = (0),
+ SSL_CRLCHECK_LEAF = (1 << 0),
+ SSL_CRLCHECK_CHAIN = (1 << 1),
+
+#define SSL_CRLCHECK_FLAGS (~0x3)
+ SSL_CRLCHECK_NO_CRL_FOR_CERT_OK = (1 << 2)
} ssl_crlcheck_t;
/**
@@ -607,7 +609,7 @@ typedef struct {
/** certificate revocation list */
const char *crl_path;
const char *crl_file;
- ssl_crlcheck_t crl_check_mode;
+ int crl_check_mask;
#ifdef HAVE_OCSP_STAPLING
/** OCSP stapling options */