summaryrefslogtreecommitdiff
path: root/modules/http2
diff options
context:
space:
mode:
Diffstat (limited to 'modules/http2')
-rw-r--r--modules/http2/.gitignore35
-rw-r--r--modules/http2/NWGNUmakefile246
-rw-r--r--modules/http2/NWGNUmod_http2415
-rw-r--r--modules/http2/NWGNUproxyht2287
-rw-r--r--modules/http2/README.h270
-rw-r--r--modules/http2/config2.m4225
-rw-r--r--modules/http2/h2.h161
-rw-r--r--modules/http2/h2_alt_svc.c130
-rw-r--r--modules/http2/h2_alt_svc.h39
-rw-r--r--modules/http2/h2_bucket_beam.c1015
-rw-r--r--modules/http2/h2_bucket_beam.h363
-rw-r--r--modules/http2/h2_bucket_eoc.c110
-rw-r--r--modules/http2/h2_bucket_eoc.h32
-rw-r--r--modules/http2/h2_bucket_eos.c111
-rw-r--r--modules/http2/h2_bucket_eos.h31
-rw-r--r--modules/http2/h2_config.c568
-rw-r--r--modules/http2/h2_config.h95
-rw-r--r--modules/http2/h2_conn.c332
-rw-r--r--modules/http2/h2_conn.h76
-rw-r--r--modules/http2/h2_conn_io.c422
-rw-r--r--modules/http2/h2_conn_io.h76
-rw-r--r--modules/http2/h2_ctx.c120
-rw-r--r--modules/http2/h2_ctx.h77
-rw-r--r--modules/http2/h2_filter.c290
-rw-r--r--modules/http2/h2_filter.h51
-rw-r--r--modules/http2/h2_from_h1.c589
-rw-r--r--modules/http2/h2_from_h1.h75
-rw-r--r--modules/http2/h2_h2.c714
-rw-r--r--modules/http2/h2_h2.h78
-rw-r--r--modules/http2/h2_mplx.c1458
-rw-r--r--modules/http2/h2_mplx.h348
-rw-r--r--modules/http2/h2_ngn_shed.c366
-rw-r--r--modules/http2/h2_ngn_shed.h76
-rw-r--r--modules/http2/h2_private.h27
-rw-r--r--modules/http2/h2_proxy_session.c1368
-rw-r--r--modules/http2/h2_proxy_session.h111
-rw-r--r--modules/http2/h2_proxy_util.c705
-rw-r--r--modules/http2/h2_proxy_util.h181
-rw-r--r--modules/http2/h2_push.c1053
-rw-r--r--modules/http2/h2_push.h116
-rw-r--r--modules/http2/h2_request.c363
-rw-r--r--modules/http2/h2_request.h48
-rw-r--r--modules/http2/h2_response.c205
-rw-r--r--modules/http2/h2_response.h73
-rw-r--r--modules/http2/h2_session.c2353
-rw-r--r--modules/http2/h2_session.h241
-rw-r--r--modules/http2/h2_stream.c697
-rw-r--r--modules/http2/h2_stream.h280
-rw-r--r--modules/http2/h2_stream_set.c145
-rw-r--r--modules/http2/h2_stream_set.h51
-rw-r--r--modules/http2/h2_switch.c185
-rw-r--r--modules/http2/h2_switch.h29
-rw-r--r--modules/http2/h2_task.c829
-rw-r--r--modules/http2/h2_task.h132
-rw-r--r--modules/http2/h2_util.c1455
-rw-r--r--modules/http2/h2_util.h389
-rw-r--r--modules/http2/h2_version.h40
-rw-r--r--modules/http2/h2_worker.c103
-rw-r--r--modules/http2/h2_worker.h135
-rw-r--r--modules/http2/h2_workers.c416
-rw-r--r--modules/http2/h2_workers.h120
-rw-r--r--modules/http2/mod_http2.c356
-rw-r--r--modules/http2/mod_http2.dep1612
-rw-r--r--modules/http2/mod_http2.dsp203
-rw-r--r--modules/http2/mod_http2.h93
-rw-r--r--modules/http2/mod_http2.mak560
-rw-r--r--modules/http2/mod_proxy_http2.c650
-rw-r--r--modules/http2/mod_proxy_http2.dep208
-rw-r--r--modules/http2/mod_proxy_http2.dsp119
-rw-r--r--modules/http2/mod_proxy_http2.h20
-rw-r--r--modules/http2/mod_proxy_http2.mak427
71 files changed, 25179 insertions, 0 deletions
diff --git a/modules/http2/.gitignore b/modules/http2/.gitignore
new file mode 100644
index 00000000..ca49620f
--- /dev/null
+++ b/modules/http2/.gitignore
@@ -0,0 +1,35 @@
+*.xcuserstate
+sandbox/httpd/packages/httpd-2.4.x.tar.gz
+sandbox/test/conf/sites/mod-h2.greenbytes.de.conf
+*.o
+*.slo
+*.lo
+*.la
+*.pcap
+.libs
+.configured
+.deps
+compile
+aclocal.m4
+autom4te.cache
+autoscan.log
+config.guess
+config.log
+config.status
+config.sub
+config.h
+config.h.in
+config.h.in~
+configure
+configure.scan
+depcomp
+install-sh
+libtool
+ltmain.sh
+missing
+stamp-h1
+Makefile.in
+Makefile
+mod_h2-*.tar.gz
+mod_h2/h2_version.h
+m4
diff --git a/modules/http2/NWGNUmakefile b/modules/http2/NWGNUmakefile
new file mode 100644
index 00000000..d4a51ed3
--- /dev/null
+++ b/modules/http2/NWGNUmakefile
@@ -0,0 +1,246 @@
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME =
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION =
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME =
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE =
+
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If these are specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/mod_http2.nlm \
+ $(OBJDIR)/proxyht2.nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs = \
+ $(EOLIST)
+
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/NWGNUmod_http2 b/modules/http2/NWGNUmod_http2
new file mode 100644
index 00000000..e9c48a40
--- /dev/null
+++ b/modules/http2/NWGNUmod_http2
@@ -0,0 +1,415 @@
+#
+# This Makefile requires the environment var NGH2SRC
+# pointing to the base directory of nghttp2 source tree.
+#
+
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NGH2SRC)/lib/ \
+ $(NGH2SRC)/lib/includes \
+ $(SERVER)/mpm/NetWare \
+ $(STDMOD)/ssl \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ -DHAVE_CONFIG_H \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ -L$(OBJDIR) \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = mod_http2
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Support module (w/ NGHTTP2 Lib)
+
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = $(NLM_NAME)
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(OBJDIR)/nghttp2.lib \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/h2_alt_svc.o \
+ $(OBJDIR)/h2_bucket_beam.o \
+ $(OBJDIR)/h2_bucket_eoc.o \
+ $(OBJDIR)/h2_bucket_eos.o \
+ $(OBJDIR)/h2_config.o \
+ $(OBJDIR)/h2_conn.o \
+ $(OBJDIR)/h2_conn_io.o \
+ $(OBJDIR)/h2_ctx.o \
+ $(OBJDIR)/h2_filter.o \
+ $(OBJDIR)/h2_from_h1.o \
+ $(OBJDIR)/h2_h2.o \
+ $(OBJDIR)/h2_mplx.o \
+ $(OBJDIR)/h2_ngn_shed.o \
+ $(OBJDIR)/h2_push.o \
+ $(OBJDIR)/h2_request.o \
+ $(OBJDIR)/h2_response.o \
+ $(OBJDIR)/h2_session.o \
+ $(OBJDIR)/h2_stream.o \
+ $(OBJDIR)/h2_switch.o \
+ $(OBJDIR)/h2_task.o \
+ $(OBJDIR)/h2_util.o \
+ $(OBJDIR)/h2_worker.o \
+ $(OBJDIR)/h2_workers.o \
+ $(OBJDIR)/mod_http2.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(OBJDIR)/nghttp2.lib \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Libc \
+ Apache2 \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ @$(OBJDIR)/mod_http2.imp \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs := $(sort $(patsubst $(NGH2SRC)/lib/%.c,$(OBJDIR)/%.o,$(wildcard $(NGH2SRC)/lib/*.c)))
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(NGH2SRC)/lib/config.h $(TARGET_lib)
+
+nlms :: libs $(OBJDIR)/mod_http2.imp $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+clean ::
+ $(call DEL,$(NGH2SRC)/lib/config.h)
+#
+# Any specialized rules here
+#
+vpath %.c $(NGH2SRC)/lib
+
+$(NGH2SRC)/lib/config.h : NWGNUmod_http2
+ @-$(RM) $@
+ @echo $(DL)GEN $@$(DL)
+ @echo $(DL)/* For NetWare target.$(DL) > $@
+ @echo $(DL)** Do not edit - created by Make!$(DL) >> $@
+ @echo $(DL)*/$(DL) >> $@
+ @echo $(DL)#ifndef NGH2_CONFIG_H$(DL) >> $@
+ @echo $(DL)#define NGH2_CONFIG_H$(DL) >> $@
+ @echo #define HAVE_ARPA_INET_H 1 >> $@
+ @echo #define HAVE_CHOWN 1 >> $@
+ @echo #define HAVE_DECL_STRERROR_R 1 >> $@
+ @echo #define HAVE_DLFCN_H 1 >> $@
+ @echo #define HAVE_DUP2 1 >> $@
+ @echo #define HAVE_FCNTL_H 1 >> $@
+ @echo #define HAVE_GETCWD 1 >> $@
+ @echo #define HAVE_INTTYPES_H 1 >> $@
+ @echo #define HAVE_LIMITS_H 1 >> $@
+ @echo #define HAVE_LOCALTIME_R 1 >> $@
+ @echo #define HAVE_MALLOC 1 >> $@
+ @echo #define HAVE_MEMCHR 1 >> $@
+ @echo #define HAVE_MEMMOVE 1 >> $@
+ @echo #define HAVE_MEMORY_H 1 >> $@
+ @echo #define HAVE_MEMSET 1 >> $@
+ @echo #define HAVE_NETDB_H 1 >> $@
+ @echo #define HAVE_NETINET_IN_H 1 >> $@
+ @echo #define HAVE_PTRDIFF_T 1 >> $@
+ @echo #define HAVE_PWD_H 1 >> $@
+ @echo #define HAVE_SOCKET 1 >> $@
+ @echo #define HAVE_SQRT 1 >> $@
+ @echo #define HAVE_STDDEF_H 1 >> $@
+ @echo #define HAVE_STDINT_H 1 >> $@
+ @echo #define HAVE_STDLIB_H 1 >> $@
+ @echo #define HAVE_STRCHR 1 >> $@
+ @echo #define HAVE_STRDUP 1 >> $@
+ @echo #define HAVE_STRERROR 1 >> $@
+ @echo #define HAVE_STRERROR_R 1 >> $@
+ @echo #define HAVE_STRINGS_H 1 >> $@
+ @echo #define HAVE_STRING_H 1 >> $@
+ @echo #define HAVE_STRSTR 1 >> $@
+ @echo #define HAVE_STRTOL 1 >> $@
+ @echo #define HAVE_STRTOUL 1 >> $@
+ @echo #define HAVE_SYSLOG_H 1 >> $@
+ @echo #define HAVE_SYS_SOCKET_H 1 >> $@
+ @echo #define HAVE_SYS_STAT_H 1 >> $@
+ @echo #define HAVE_SYS_TIME_H 1 >> $@
+ @echo #define HAVE_SYS_TYPES_H 1 >> $@
+ @echo #define HAVE_TIME_H 1 >> $@
+ @echo #define HAVE_UNISTD_H 1 >> $@
+
+ @echo #define SIZEOF_INT_P 4 >> $@
+ @echo #define STDC_HEADERS 1 >> $@
+ @echo #define STRERROR_R_CHAR_P 4 >> $@
+
+# Hint to compiler a function parameter is not used
+ @echo #define _U_ >> $@
+
+ @echo #ifndef __cplusplus >> $@
+ @echo #define inline __inline >> $@
+ @echo #endif >> $@
+
+ @echo $(DL)#endif /* NGH2_CONFIG_H */$(DL) >> $@
+
+#
+# Exports from mod_http2 for mod_proxy_http2
+$(OBJDIR)/mod_http2.imp : NWGNUmod_http2
+ @-$(RM) $@
+ @echo $(DL)GEN $@$(DL)
+ @echo $(DL) (HTTP2)$(DL) > $@
+ @echo $(DL) http2_module,$(DL) >> $@
+ @echo $(DL) h2_ihash_add,$(DL) >> $@
+ @echo $(DL) h2_ihash_clear,$(DL) >> $@
+ @echo $(DL) h2_ihash_count,$(DL) >> $@
+ @echo $(DL) h2_ihash_create,$(DL) >> $@
+ @echo $(DL) h2_ihash_empty,$(DL) >> $@
+ @echo $(DL) h2_ihash_iter,$(DL) >> $@
+ @echo $(DL) h2_ihash_remove,$(DL) >> $@
+ @echo $(DL) h2_iq_add,$(DL) >> $@
+ @echo $(DL) h2_iq_create,$(DL) >> $@
+ @echo $(DL) h2_iq_remove,$(DL) >> $@
+ @echo $(DL) h2_log2,$(DL) >> $@
+ @echo $(DL) h2_proxy_res_ignore_header,$(DL) >> $@
+ @echo $(DL) h2_headers_add_h1,$(DL) >> $@
+ @echo $(DL) h2_req_create,$(DL) >> $@
+ @echo $(DL) h2_req_createn,$(DL) >> $@
+ @echo $(DL) h2_req_make,$(DL) >> $@
+ @echo $(DL) h2_util_camel_case_header,$(DL) >> $@
+ @echo $(DL) h2_util_frame_print,$(DL) >> $@
+ @echo $(DL) h2_util_ngheader_make_req,$(DL) >> $@
+ @echo $(DL) nghttp2_is_fatal,$(DL) >> $@
+ @echo $(DL) nghttp2_option_del,$(DL) >> $@
+ @echo $(DL) nghttp2_option_new,$(DL) >> $@
+ @echo $(DL) nghttp2_option_set_no_auto_window_update,$(DL) >> $@
+ @echo $(DL) nghttp2_option_set_peer_max_concurrent_streams,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_del,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_new,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_before_frame_send_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_data_chunk_recv_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_frame_recv_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_header_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_on_stream_close_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_callbacks_set_send_callback,$(DL) >> $@
+ @echo $(DL) nghttp2_session_client_new2,$(DL) >> $@
+ @echo $(DL) nghttp2_session_consume,$(DL) >> $@
+ @echo $(DL) nghttp2_session_consume_connection,$(DL) >> $@
+ @echo $(DL) nghttp2_session_del,$(DL) >> $@
+ @echo $(DL) nghttp2_session_get_remote_settings,$(DL) >> $@
+ @echo $(DL) nghttp2_session_get_stream_user_data,$(DL) >> $@
+ @echo $(DL) nghttp2_session_mem_recv,$(DL) >> $@
+ @echo $(DL) nghttp2_session_resume_data,$(DL) >> $@
+ @echo $(DL) nghttp2_session_send,$(DL) >> $@
+ @echo $(DL) nghttp2_session_want_read,$(DL) >> $@
+ @echo $(DL) nghttp2_session_want_write,$(DL) >> $@
+ @echo $(DL) nghttp2_strerror,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_goaway,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_request,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_rst_stream,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_settings,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_window_update,$(DL) >> $@
+ @echo $(DL) nghttp2_version$(DL) >> $@
+
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/NWGNUproxyht2 b/modules/http2/NWGNUproxyht2
new file mode 100644
index 00000000..7153d084
--- /dev/null
+++ b/modules/http2/NWGNUproxyht2
@@ -0,0 +1,287 @@
+#
+# This Makefile requires the environment var NGH2SRC
+# pointing to the base directory of nghttp2 source tree.
+#
+
+#
+# Declare the sub-directories to be built here
+#
+
+SUBDIRS = \
+ $(EOLIST)
+
+#
+# Get the 'head' of the build environment. This includes default targets and
+# paths to tools
+#
+
+include $(AP_WORK)/build/NWGNUhead.inc
+
+#
+# build this level's files
+#
+# Make sure all needed macro's are defined
+#
+
+#
+# These directories will be at the beginning of the include list, followed by
+# INCDIRS
+#
+XINCDIRS += \
+ $(APR)/include \
+ $(APRUTIL)/include \
+ $(SRC)/include \
+ $(NGH2SRC)/lib/includes \
+ $(STDMOD)/proxy \
+ $(SERVER)/mpm/NetWare \
+ $(NWOS) \
+ $(EOLIST)
+
+#
+# These flags will come after CFLAGS
+#
+XCFLAGS += \
+ $(EOLIST)
+
+#
+# These defines will come after DEFINES
+#
+XDEFINES += \
+ $(EOLIST)
+
+#
+# These flags will be added to the link.opt file
+#
+XLFLAGS += \
+ -L$(OBJDIR) \
+ $(EOLIST)
+
+#
+# These values will be appended to the correct variables based on the value of
+# RELEASE
+#
+ifeq "$(RELEASE)" "debug"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "noopt"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+ifeq "$(RELEASE)" "release"
+XINCDIRS += \
+ $(EOLIST)
+
+XCFLAGS += \
+ $(EOLIST)
+
+XDEFINES += \
+ $(EOLIST)
+
+XLFLAGS += \
+ $(EOLIST)
+endif
+
+#
+# These are used by the link target if an NLM is being generated
+# This is used by the link 'name' directive to name the nlm. If left blank
+# TARGET_nlm (see below) will be used.
+#
+NLM_NAME = proxyht2
+
+#
+# This is used by the link '-desc ' directive.
+# If left blank, NLM_NAME will be used.
+#
+NLM_DESCRIPTION = Apache $(VERSION_STR) HTTP2 Proxy module
+#
+# This is used by the '-threadname' directive. If left blank,
+# NLM_NAME Thread will be used.
+#
+NLM_THREAD_NAME = $(NLM_NAME)
+
+#
+# If this is specified, it will override VERSION value in
+# $(AP_WORK)/build/NWGNUenvironment.inc
+#
+NLM_VERSION =
+
+#
+# If this is specified, it will override the default of 64K
+#
+NLM_STACK_SIZE = 65536
+
+#
+# If this is specified it will be used by the link '-entry' directive
+#
+NLM_ENTRY_SYM =
+
+#
+# If this is specified it will be used by the link '-exit' directive
+#
+NLM_EXIT_SYM =
+
+#
+# If this is specified it will be used by the link '-check' directive
+#
+NLM_CHECK_SYM =
+
+#
+# If this is specified it will be used by the link '-flags' directive
+#
+NLM_FLAGS =
+
+#
+# If this is specified it will be linked in with the XDCData option in the def
+# file instead of the default of $(NWOS)/apache.xdc. XDCData can be disabled
+# by setting APACHE_UNIPROC in the environment
+#
+XDCDATA =
+
+#
+# Declare all target files (you must add your files here)
+#
+
+#
+# If there is an NLM target, put it here
+#
+TARGET_nlm = \
+ $(OBJDIR)/$(NLM_NAME).nlm \
+ $(EOLIST)
+
+#
+# If there is an LIB target, put it here
+#
+TARGET_lib = \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the NLM target above.
+# Paths must all use the '/' character
+#
+FILES_nlm_objs = \
+ $(OBJDIR)/mod_proxy_http2.o \
+ $(OBJDIR)/h2_proxy_session.o \
+ $(EOLIST)
+
+#
+# These are the LIB files needed to create the NLM target above.
+# These will be added as a library command in the link.opt file.
+#
+FILES_nlm_libs = \
+ $(PRELUDE) \
+ $(EOLIST)
+
+#
+# These are the modules that the above NLM target depends on to load.
+# These will be added as a module command in the link.opt file.
+#
+FILES_nlm_modules = \
+ Libc \
+ Apache2 \
+ mod_proxy \
+ mod_http2 \
+ $(EOLIST)
+
+#
+# If the nlm has a msg file, put it's path here
+#
+FILE_nlm_msg =
+
+#
+# If the nlm has a hlp file put it's path here
+#
+FILE_nlm_hlp =
+
+#
+# If this is specified, it will override $(NWOS)\copyright.txt.
+#
+FILE_nlm_copyright =
+
+#
+# Any additional imports go here
+#
+FILES_nlm_Ximports = \
+ @libc.imp \
+ @aprlib.imp \
+ @httpd.imp \
+ @$(OBJDIR)/mod_http2.imp \
+ ap_proxy_acquire_connection \
+ ap_proxy_canon_netloc \
+ ap_proxy_canonenc \
+ ap_proxy_connect_backend \
+ ap_proxy_connection_create \
+ ap_proxy_cookie_reverse_map \
+ ap_proxy_determine_connection \
+ ap_proxy_location_reverse_map \
+ ap_proxy_port_of_scheme \
+ ap_proxy_release_connection \
+ ap_proxy_ssl_connection_cleanup \
+ ap_sock_disable_nagle \
+ proxy_hook_canon_handler \
+ proxy_hook_scheme_handler \
+ proxy_module \
+ proxy_run_detach_backend \
+ $(EOLIST)
+
+#
+# Any symbols exported to here
+#
+FILES_nlm_exports = \
+ proxy_http2_module \
+ $(EOLIST)
+
+#
+# These are the OBJ files needed to create the LIB target above.
+# Paths must all use the '/' character
+#
+FILES_lib_objs :=
+#
+# implement targets and dependancies (leave this section alone)
+#
+
+libs :: $(OBJDIR) $(TARGET_lib)
+
+nlms :: libs $(TARGET_nlm)
+
+#
+# Updated this target to create necessary directories and copy files to the
+# correct place. (See $(AP_WORK)/build/NWGNUhead.inc for examples)
+#
+install :: nlms FORCE
+ $(call COPY,$(OBJDIR)/*.nlm, $(INSTALLBASE)/modules/)
+
+clean ::
+
+#
+# Any specialized rules here
+#
+
+#
+# Include the 'tail' makefile that has targets that depend on variables defined
+# in this makefile
+#
+
+include $(APBUILD)/NWGNUtail.inc
+
+
diff --git a/modules/http2/README.h2 b/modules/http2/README.h2
new file mode 100644
index 00000000..399086b7
--- /dev/null
+++ b/modules/http2/README.h2
@@ -0,0 +1,70 @@
+The http2 module adds support for the HTTP/2 protocol to the server.
+
+Specifically, it supports the protocols "h2" (HTTP2 over TLS) and "h2c"
+(HTTP2 over plain HTTP connections via Upgrade). Additionally it offers
+the "direct" mode for both encrypted and unencrypted connections.
+
+You may enable it for the whole server or specific virtual hosts only.
+
+
+BUILD
+
+If you have libnghttp2 (https://nghttp2.org) installed on your system, simply
+add
+
+ --enable-http2
+
+to your httpd ./configure invocation. Should libnghttp2 reside in a unusual
+location, add
+
+ --with-nghttp2=<path>
+
+to ./configure. <path> is expected to be the installation prefix, so there
+should be a <path>/lib/libnghttp2.*. If your system support pkg-config,
+<path>/lib/pkgconfig/libnghttp2.pc will be inspected.
+
+If you want to link nghttp2 statically into the mod_http2 module, you may
+similarly to mod_ssl add
+
+ --enable-nghttp2-staticlib-deps
+
+For this, the lib directory should only contain the libnghttp2.a, not its
+shared cousins.
+
+
+CONFIGURATION
+
+If mod_http2 is enabled for a site or not depends on the new "Protocols"
+directive. This directive list all protocols enabled for a server or
+virtual host.
+
+If you do not specify "Protocols" all available protocols are enabled. For
+sites using TLS, the protocol supported by mod_http2 is "h2". For cleartext
+http:, the offered protocol is "h2c".
+
+The following is an example of a server that only supports http/1.1 in
+general and offers h2 for a specific virtual host.
+
+ ...
+ Protocols http/1.1
+ <virtualhost *:443>
+ Protocols h2 http/1.1
+ ...
+ </virtualhost>
+
+Please see the documentation of mod_http2 for a complete list and explanation
+of all options.
+
+
+TLS CONFIGURATION
+
+If you want to use HTTP/2 with a browser, most modern browsers will support
+it without further configuration. However, browsers so far only support
+HTTP/2 over TLS and are expecially picky about the certificate and
+encryption ciphers used.
+
+Server admins may look for up-to-date information about "modern" TLS
+compatibility under:
+
+ https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
new file mode 100644
index 00000000..a77ad808
--- /dev/null
+++ b/modules/http2/config2.m4
@@ -0,0 +1,225 @@
+dnl Licensed to the Apache Software Foundation (ASF) under one or more
+dnl contributor license agreements. See the NOTICE file distributed with
+dnl this work for additional information regarding copyright ownership.
+dnl The ASF licenses this file to You under the Apache License, Version 2.0
+dnl (the "License"); you may not use this file except in compliance with
+dnl the License. You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl # start of module specific part
+APACHE_MODPATH_INIT(http2)
+
+dnl # list of module object files
+http2_objs="dnl
+mod_http2.lo dnl
+h2_alt_svc.lo dnl
+h2_bucket_beam.lo dnl
+h2_bucket_eoc.lo dnl
+h2_bucket_eos.lo dnl
+h2_config.lo dnl
+h2_conn.lo dnl
+h2_conn_io.lo dnl
+h2_ctx.lo dnl
+h2_filter.lo dnl
+h2_from_h1.lo dnl
+h2_h2.lo dnl
+h2_mplx.lo dnl
+h2_ngn_shed.lo dnl
+h2_push.lo dnl
+h2_request.lo dnl
+h2_response.lo dnl
+h2_session.lo dnl
+h2_stream.lo dnl
+h2_switch.lo dnl
+h2_task.lo dnl
+h2_util.lo dnl
+h2_worker.lo dnl
+h2_workers.lo dnl
+"
+
+dnl
+dnl APACHE_CHECK_NGHTTP2
+dnl
+dnl Configure for nghttp2, giving preference to
+dnl "--with-nghttp2=<path>" if it was specified.
+dnl
+AC_DEFUN([APACHE_CHECK_NGHTTP2],[
+ AC_CACHE_CHECK([for nghttp2], [ac_cv_nghttp2], [
+ dnl initialise the variables we use
+ ac_cv_nghttp2=no
+ ap_nghttp2_found=""
+ ap_nghttp2_base=""
+ ap_nghttp2_libs=""
+
+ dnl Determine the nghttp2 base directory, if any
+ AC_MSG_CHECKING([for user-provided nghttp2 base directory])
+ AC_ARG_WITH(nghttp2, APACHE_HELP_STRING(--with-nghttp2=PATH, nghttp2 installation directory), [
+ dnl If --with-nghttp2 specifies a directory, we use that directory
+ if test "x$withval" != "xyes" -a "x$withval" != "x"; then
+ dnl This ensures $withval is actually a directory and that it is absolute
+ ap_nghttp2_base="`cd $withval ; pwd`"
+ fi
+ ])
+ if test "x$ap_nghttp2_base" = "x"; then
+ AC_MSG_RESULT(none)
+ else
+ AC_MSG_RESULT($ap_nghttp2_base)
+ fi
+
+ dnl Run header and version checks
+ saved_CPPFLAGS="$CPPFLAGS"
+ saved_LIBS="$LIBS"
+ saved_LDFLAGS="$LDFLAGS"
+
+ dnl Before doing anything else, load in pkg-config variables
+ if test -n "$PKGCONFIG"; then
+ saved_PKG_CONFIG_PATH="$PKG_CONFIG_PATH"
+ AC_MSG_CHECKING([for pkg-config along $PKG_CONFIG_PATH])
+ if test "x$ap_nghttp2_base" != "x" -a \
+ -f "${ap_nghttp2_base}/lib/pkgconfig/libnghttp2.pc"; then
+ dnl Ensure that the given path is used by pkg-config too, otherwise
+ dnl the system libnghttp2.pc might be picked up instead.
+ PKG_CONFIG_PATH="${ap_nghttp2_base}/lib/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
+ export PKG_CONFIG_PATH
+ fi
+ AC_ARG_ENABLE(nghttp2-staticlib-deps,APACHE_HELP_STRING(--enable-nghttp2-staticlib-deps,[link mod_http2 with dependencies of libnghttp2's static libraries (as indicated by "pkg-config --static"). Must be specified in addition to --enable-http2.]), [
+ if test "$enableval" = "yes"; then
+ PKGCONFIG_LIBOPTS="--static"
+ fi
+ ])
+ ap_nghttp2_libs="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-l --silence-errors libnghttp2`"
+ if test $? -eq 0; then
+ ap_nghttp2_found="yes"
+ pkglookup="`$PKGCONFIG --cflags-only-I libnghttp2`"
+ APR_ADDTO(CPPFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_CFLAGS, [$pkglookup])
+ pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-L libnghttp2`"
+ APR_ADDTO(LDFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_LDFLAGS, [$pkglookup])
+ pkglookup="`$PKGCONFIG $PKGCONFIG_LIBOPTS --libs-only-other libnghttp2`"
+ APR_ADDTO(LDFLAGS, [$pkglookup])
+ APR_ADDTO(MOD_LDFLAGS, [$pkglookup])
+ fi
+ PKG_CONFIG_PATH="$saved_PKG_CONFIG_PATH"
+ fi
+
+ dnl fall back to the user-supplied directory if not found via pkg-config
+ if test "x$ap_nghttp2_base" != "x" -a "x$ap_nghttp2_found" = "x"; then
+ APR_ADDTO(CPPFLAGS, [-I$ap_nghttp2_base/include])
+ APR_ADDTO(MOD_CFLAGS, [-I$ap_nghttp2_base/include])
+ APR_ADDTO(LDFLAGS, [-L$ap_nghttp2_base/lib])
+ APR_ADDTO(MOD_LDFLAGS, [-L$ap_nghttp2_base/lib])
+ if test "x$ap_platform_runtime_link_flag" != "x"; then
+ APR_ADDTO(LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib])
+ APR_ADDTO(MOD_LDFLAGS, [$ap_platform_runtime_link_flag$ap_nghttp2_base/lib])
+ fi
+ fi
+
+ AC_MSG_CHECKING([for nghttp2 version >= 1.2.1])
+ AC_TRY_COMPILE([#include <nghttp2/nghttp2ver.h>],[
+#if !defined(NGHTTP2_VERSION_NUM)
+#error "Missing nghttp2 version"
+#endif
+#if NGHTTP2_VERSION_NUM < 0x010201
+#error "Unsupported nghttp2 version " NGHTTP2_VERSION_TEXT
+#endif],
+ [AC_MSG_RESULT(OK)
+ ac_cv_nghttp2=yes],
+ [AC_MSG_RESULT(FAILED)])
+
+ if test "x$ac_cv_nghttp2" = "xyes"; then
+ ap_nghttp2_libs="${ap_nghttp2_libs:--lnghttp2} `$apr_config --libs`"
+ APR_ADDTO(MOD_LDFLAGS, [$ap_nghttp2_libs])
+ APR_ADDTO(LIBS, [$ap_nghttp2_libs])
+
+ dnl Run library and function checks
+ liberrors=""
+ AC_CHECK_HEADERS([nghttp2/nghttp2.h])
+ AC_CHECK_FUNCS([nghttp2_session_server_new2], [], [liberrors="yes"])
+ if test "x$liberrors" != "x"; then
+ AC_MSG_WARN([nghttp2 library is unusable])
+ fi
+dnl # nghttp2 >= 1.3.0: access to stream weights
+ AC_CHECK_FUNCS([nghttp2_stream_get_weight], [], [liberrors="yes"])
+ if test "x$liberrors" != "x"; then
+ AC_MSG_WARN([nghttp2 version >= 1.3.0 is required])
+ fi
+dnl # nghttp2 >= 1.5.0: changing stream priorities
+ AC_CHECK_FUNCS([nghttp2_session_change_stream_priority],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_CHANGE_PRIO"])], [])
+ else
+ AC_MSG_WARN([nghttp2 version is too old])
+ fi
+
+ dnl restore
+ CPPFLAGS="$saved_CPPFLAGS"
+ LIBS="$saved_LIBS"
+ LDFLAGS="$saved_LDFLAGS"
+ ])
+ if test "x$ac_cv_nghttp2" = "xyes"; then
+ AC_DEFINE(HAVE_NGHTTP2, 1, [Define if nghttp2 is available])
+ fi
+])
+
+
+dnl # hook module into the Autoconf mechanism (--enable-http2)
+APACHE_MODULE(http2, [HTTP/2 protocol handling in addition to HTTP protocol
+handling. Implemented by mod_http2. This module requires a libnghttp2 installation.
+See --with-nghttp2 on how to manage non-standard locations. This module
+is usually linked shared and requires loading. ], $http2_objs, , most, [
+ APACHE_CHECK_OPENSSL
+ if test "$ac_cv_openssl" = "yes" ; then
+ APR_ADDTO(MOD_CPPFLAGS, ["-DH2_OPENSSL"])
+ fi
+
+ APACHE_CHECK_NGHTTP2
+ if test "$ac_cv_nghttp2" = "yes" ; then
+ if test "x$enable_http2" = "xshared"; then
+ # The only symbol which needs to be exported is the module
+ # structure, so ask libtool to hide everything else:
+ APR_ADDTO(MOD_HTTP2_LDADD, [-export-symbols-regex http2_module])
+ fi
+ else
+ enable_http2=no
+ fi
+])
+
+# Ensure that other modules can pick up mod_http2.h
+# icing: hold back for now until it is more stable
+#APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
+
+
+
+dnl # list of module object files
+proxy_http2_objs="dnl
+mod_proxy_http2.lo dnl
+h2_proxy_session.lo dnl
+h2_proxy_util.lo dnl
+"
+
+dnl # hook module into the Autoconf mechanism (--enable-proxy_http2)
+APACHE_MODULE(proxy_http2, [HTTP/2 proxy module. This module requires a libnghttp2 installation.
+See --with-nghttp2 on how to manage non-standard locations. Also requires --enable-proxy.], $proxy_http2_objs, , no, [
+ APACHE_CHECK_NGHTTP2
+ if test "$ac_cv_nghttp2" = "yes" ; then
+ if test "x$enable_http2" = "xshared"; then
+ # The only symbol which needs to be exported is the module
+ # structure, so ask libtool to hide everything else:
+ APR_ADDTO(MOD_PROXY_HTTP2_LDADD, [-export-symbols-regex proxy_http2_module])
+ fi
+ else
+ enable_proxy_http2=no
+ fi
+], proxy)
+
+
+dnl # end of module specific part
+APACHE_MODPATH_FINISH
+
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
new file mode 100644
index 00000000..9075b00a
--- /dev/null
+++ b/modules/http2/h2.h
@@ -0,0 +1,161 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2__
+#define __mod_h2__h2__
+
+/**
+ * The magic PRIamble of RFC 7540 that is always sent when starting
+ * a h2 communication.
+ */
+extern const char *H2_MAGIC_TOKEN;
+
+#define H2_ERR_NO_ERROR (0x00)
+#define H2_ERR_PROTOCOL_ERROR (0x01)
+#define H2_ERR_INTERNAL_ERROR (0x02)
+#define H2_ERR_FLOW_CONTROL_ERROR (0x03)
+#define H2_ERR_SETTINGS_TIMEOUT (0x04)
+#define H2_ERR_STREAM_CLOSED (0x05)
+#define H2_ERR_FRAME_SIZE_ERROR (0x06)
+#define H2_ERR_REFUSED_STREAM (0x07)
+#define H2_ERR_CANCEL (0x08)
+#define H2_ERR_COMPRESSION_ERROR (0x09)
+#define H2_ERR_CONNECT_ERROR (0x0a)
+#define H2_ERR_ENHANCE_YOUR_CALM (0x0b)
+#define H2_ERR_INADEQUATE_SECURITY (0x0c)
+#define H2_ERR_HTTP_1_1_REQUIRED (0x0d)
+
+#define H2_HEADER_METHOD ":method"
+#define H2_HEADER_METHOD_LEN 7
+#define H2_HEADER_SCHEME ":scheme"
+#define H2_HEADER_SCHEME_LEN 7
+#define H2_HEADER_AUTH ":authority"
+#define H2_HEADER_AUTH_LEN 10
+#define H2_HEADER_PATH ":path"
+#define H2_HEADER_PATH_LEN 5
+#define H2_CRLF "\r\n"
+
+/* Maximum number of padding bytes in a frame, rfc7540 */
+#define H2_MAX_PADLEN 256
+/* Initial default window size, RFC 7540 ch. 6.5.2 */
+#define H2_INITIAL_WINDOW_SIZE ((64*1024)-1)
+
+#define H2_HTTP_2XX(a) ((a) >= 200 && (a) < 300)
+
+#define H2_STREAM_CLIENT_INITIATED(id) (id&0x01)
+
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#define H2MAX(x,y) ((x) > (y) ? (x) : (y))
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+typedef enum {
+ H2_DEPENDANT_AFTER,
+ H2_DEPENDANT_INTERLEAVED,
+ H2_DEPENDANT_BEFORE,
+} h2_dependency;
+
+typedef struct h2_priority {
+ h2_dependency dependency;
+ int weight;
+} h2_priority;
+
+typedef enum {
+ H2_PUSH_NONE,
+ H2_PUSH_DEFAULT,
+ H2_PUSH_HEAD,
+ H2_PUSH_FAST_LOAD,
+} h2_push_policy;
+
+typedef enum {
+ H2_STREAM_ST_IDLE,
+ H2_STREAM_ST_OPEN,
+ H2_STREAM_ST_RESV_LOCAL,
+ H2_STREAM_ST_RESV_REMOTE,
+ H2_STREAM_ST_CLOSED_INPUT,
+ H2_STREAM_ST_CLOSED_OUTPUT,
+ H2_STREAM_ST_CLOSED,
+} h2_stream_state_t;
+
+typedef enum {
+ H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_SESSION_ST_DONE, /* finished, connection close */
+ H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */
+ H2_SESSION_ST_BUSY, /* read/write without stop */
+ H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */
+ H2_SESSION_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */
+ H2_SESSION_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */
+} h2_session_state;
+
+typedef struct h2_session_props {
+ apr_uint32_t accepted_max; /* the highest remote stream id was/will be handled */
+ apr_uint32_t completed_max; /* the highest remote stream completed */
+ apr_uint32_t emitted_count; /* the number of local streams sent */
+ apr_uint32_t emitted_max; /* the highest local stream id sent */
+ apr_uint32_t error; /* the last session error encountered */
+ unsigned int accepting : 1; /* if the session is accepting new streams */
+} h2_session_props;
+
+
+/* h2_request is the transformer of HTTP2 streams into HTTP/1.1 internal
+ * format that will be fed to various httpd input filters to finally
+ * become a request_rec to be handled by soemone.
+ */
+typedef struct h2_request h2_request;
+
+struct h2_request {
+ int id; /* stream id */
+ int initiated_on; /* initiating stream id (PUSH) or 0 */
+
+ const char *method; /* pseudo header values, see ch. 8.1.2.3 */
+ const char *scheme;
+ const char *authority;
+ const char *path;
+
+ apr_table_t *headers;
+ apr_table_t *trailers;
+
+ apr_time_t request_time;
+ apr_off_t content_length;
+
+ unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
+ unsigned int eoh : 1; /* iff end-of-headers has been seen and request is complete */
+ unsigned int body : 1; /* iff this request has a body */
+ unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+ unsigned int push_policy; /* which push policy to use for this request */
+};
+
+typedef struct h2_response h2_response;
+
+struct h2_response {
+ int stream_id;
+ int rst_error;
+ int http_status;
+ apr_off_t content_length;
+ apr_table_t *headers;
+ apr_table_t *trailers;
+ const char *sos_filter;
+};
+
+typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
+
+typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx);
+
+/* Note key to attach connection task id to conn_rec/request_rec instances */
+
+#define H2_TASK_ID_NOTE "http2-task-id"
+
+
+#endif /* defined(__mod_h2__h2__) */
diff --git a/modules/http2/h2_alt_svc.c b/modules/http2/h2_alt_svc.c
new file mode 100644
index 00000000..24a8b1f4
--- /dev/null
+++ b/modules/http2/h2_alt_svc.c
@@ -0,0 +1,130 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_strings.h>
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_alt_svc.h"
+#include "h2_ctx.h"
+#include "h2_config.h"
+#include "h2_h2.h"
+#include "h2_util.h"
+
+static int h2_alt_svc_handler(request_rec *r);
+
+void h2_alt_svc_register_hooks(void)
+{
+ ap_hook_post_read_request(h2_alt_svc_handler, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+/**
+ * Parse an Alt-Svc specifier as described in "HTTP Alternative Services"
+ * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04)
+ * with the following changes:
+ * - do not percent encode token values
+ * - do not use quotation marks
+ */
+h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool)
+{
+ const char *sep = ap_strchr_c(s, '=');
+ if (sep) {
+ const char *alpn = apr_pstrmemdup(pool, s, sep - s);
+ const char *host = NULL;
+ int port = 0;
+ s = sep + 1;
+ sep = ap_strchr_c(s, ':'); /* mandatory : */
+ if (sep) {
+ if (sep != s) { /* optional host */
+ host = apr_pstrmemdup(pool, s, sep - s);
+ }
+ s = sep + 1;
+ if (*s) { /* must be a port number */
+ port = (int)apr_atoi64(s);
+ if (port > 0 && port < (0x1 << 16)) {
+ h2_alt_svc *as = apr_pcalloc(pool, sizeof(*as));
+ as->alpn = alpn;
+ as->host = host;
+ as->port = port;
+ return as;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+#define h2_alt_svc_IDX(list, i) ((h2_alt_svc**)(list)->elts)[i]
+
+static int h2_alt_svc_handler(request_rec *r)
+{
+ const h2_config *cfg;
+ int i;
+
+ if (r->connection->keepalives > 0) {
+ /* Only announce Alt-Svc on the first response */
+ return DECLINED;
+ }
+
+ if (h2_ctx_rget(r)) {
+ return DECLINED;
+ }
+
+ cfg = h2_config_rget(r);
+ if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) {
+ const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used");
+ if (!alt_svc_used) {
+ /* We have alt-svcs defined and client is not already using
+ * one, announce the services that were configured and match.
+ * The security of this connection determines if we allow
+ * other host names or ports only.
+ */
+ const char *alt_svc = "";
+ const char *svc_ma = "";
+ int secure = h2_h2_is_tls(r->connection);
+ int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE);
+ if (ma >= 0) {
+ svc_ma = apr_psprintf(r->pool, "; ma=%d", ma);
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03043)
+ "h2_alt_svc: announce %s for %s:%d",
+ (secure? "secure" : "insecure"),
+ r->hostname, (int)r->server->port);
+ for (i = 0; i < cfg->alt_svcs->nelts; ++i) {
+ h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i);
+ const char *ahost = as->host;
+ if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) {
+ ahost = NULL;
+ }
+ if (secure || !ahost) {
+ alt_svc = apr_psprintf(r->pool, "%s%s%s=\"%s:%d\"%s",
+ alt_svc,
+ (*alt_svc? ", " : ""), as->alpn,
+ ahost? ahost : "", as->port,
+ svc_ma);
+ }
+ }
+ if (*alt_svc) {
+ apr_table_setn(r->headers_out, "Alt-Svc", alt_svc);
+ }
+ }
+ }
+
+ return DECLINED;
+}
diff --git a/modules/http2/h2_alt_svc.h b/modules/http2/h2_alt_svc.h
new file mode 100644
index 00000000..51f89d00
--- /dev/null
+++ b/modules/http2/h2_alt_svc.h
@@ -0,0 +1,39 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_alt_svc__
+#define __mod_h2__h2_alt_svc__
+
+typedef struct h2_alt_svc h2_alt_svc;
+
+struct h2_alt_svc {
+ const char *alpn;
+ const char *host;
+ int port;
+};
+
+void h2_alt_svc_register_hooks(void);
+
+/**
+ * Parse an Alt-Svc specifier as described in "HTTP Alternative Services"
+ * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04)
+ * with the following changes:
+ * - do not percent encode token values
+ * - do not use quotation marks
+ */
+h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool);
+
+
+#endif /* defined(__mod_h2__h2_alt_svc__) */
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
new file mode 100644
index 00000000..cf2cb84d
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.c
@@ -0,0 +1,1015 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+#include <apr_buckets.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <httpd.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_util.h"
+#include "h2_bucket_beam.h"
+
+static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy);
+
+#define H2_BPROXY_NEXT(e) APR_RING_NEXT((e), link)
+#define H2_BPROXY_PREV(e) APR_RING_PREV((e), link)
+#define H2_BPROXY_REMOVE(e) APR_RING_REMOVE((e), link)
+
+#define H2_BPROXY_LIST_INIT(b) APR_RING_INIT(&(b)->list, h2_beam_proxy, link);
+#define H2_BPROXY_LIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, h2_beam_proxy, link)
+#define H2_BPROXY_LIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, h2_beam_proxy, link)
+#define H2_BPROXY_LIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BPROXY_LIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_PROXY_BLIST_INSERT_HEAD(b, e) do { \
+ h2_beam_proxy *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_INSERT_TAIL(b, e) do { \
+ h2_beam_proxy *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+ } while (0)
+#define H2_BPROXY_LIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+ } while (0)
+
+
+/*******************************************************************************
+ * beam bucket with reference to beam and bucket it represents
+ ******************************************************************************/
+
+const apr_bucket_type_t h2_bucket_type_beam;
+
+#define H2_BUCKET_IS_BEAM(e) (e->type == &h2_bucket_type_beam)
+
+struct h2_beam_proxy {
+ apr_bucket_refcount refcount;
+ APR_RING_ENTRY(h2_beam_proxy) link;
+ h2_bucket_beam *beam;
+ apr_bucket *bred;
+ apr_size_t n;
+};
+
+static const char Dummy = '\0';
+
+static apr_status_t beam_bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ h2_beam_proxy *d = b->data;
+ if (d->bred) {
+ const char *data;
+ apr_status_t status = apr_bucket_read(d->bred, &data, len, block);
+ if (status == APR_SUCCESS) {
+ *str = data + b->start;
+ *len = b->length;
+ }
+ return status;
+ }
+ *str = &Dummy;
+ *len = 0;
+ return APR_ECONNRESET;
+}
+
+static void beam_bucket_destroy(void *data)
+{
+ h2_beam_proxy *d = data;
+
+ if (apr_bucket_shared_destroy(d)) {
+ /* When the beam gets destroyed before this bucket, it will
+ * NULLify its reference here. This is not protected by a mutex,
+ * so it will not help with race conditions.
+ * But it lets us shut down memory pool with circulare beam
+ * references. */
+ if (d->beam) {
+ h2_beam_emitted(d->beam, d);
+ }
+ apr_bucket_free(d);
+ }
+}
+
+static apr_bucket * h2_beam_bucket_make(apr_bucket *b,
+ h2_bucket_beam *beam,
+ apr_bucket *bred, apr_size_t n)
+{
+ h2_beam_proxy *d;
+
+ d = apr_bucket_alloc(sizeof(*d), b->list);
+ H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d);
+ d->beam = beam;
+ d->bred = bred;
+ d->n = n;
+
+ b = apr_bucket_shared_make(b, d, 0, bred? bred->length : 0);
+ b->type = &h2_bucket_type_beam;
+
+ return b;
+}
+
+static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
+ apr_bucket *bred,
+ apr_bucket_alloc_t *list,
+ apr_size_t n)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ return h2_beam_bucket_make(b, beam, bred, n);
+}
+
+/*static apr_status_t beam_bucket_setaside(apr_bucket *b, apr_pool_t *pool)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_beam_proxy *d = b->data;
+ if (d->bred) {
+ const char *data;
+ apr_size_t len;
+
+ status = apr_bucket_read(d->bred, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ b = apr_bucket_heap_make(b, (char *)data + b->start, b->length, NULL);
+ if (b == NULL) {
+ return APR_ENOMEM;
+ }
+ }
+ }
+ return status;
+}*/
+
+const apr_bucket_type_t h2_bucket_type_beam = {
+ "BEAM", 5, APR_BUCKET_DATA,
+ beam_bucket_destroy,
+ beam_bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_shared_split,
+ apr_bucket_shared_copy
+};
+
+/*******************************************************************************
+ * h2_blist, a brigade without allocations
+ ******************************************************************************/
+
+apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ h2_blist *bl)
+{
+ apr_size_t off = 0;
+ const char *sp = "";
+ apr_bucket *b;
+
+ if (bl) {
+ memset(buffer, 0, bmax--);
+ off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
+ for (b = H2_BLIST_FIRST(bl);
+ bmax && (b != H2_BLIST_SENTINEL(bl));
+ b = APR_BUCKET_NEXT(b)) {
+
+ off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
+ sp = " ";
+ }
+ off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
+ }
+ return off;
+}
+
+
+
+/*******************************************************************************
+ * bucket beam that can transport buckets across threads
+ ******************************************************************************/
+
+static apr_status_t enter_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ h2_beam_mutex_enter *enter = beam->m_enter;
+ if (enter) {
+ void *ctx = beam->m_ctx;
+ if (ctx) {
+ return enter(ctx, pbl);
+ }
+ }
+ pbl->mutex = NULL;
+ pbl->leave = NULL;
+ return APR_SUCCESS;
+}
+
+static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ if (pbl->leave) {
+ pbl->leave(pbl->leave_ctx, pbl->mutex);
+ }
+}
+
+static apr_off_t calc_buffered(h2_bucket_beam *beam)
+{
+ apr_off_t len = 0;
+ apr_bucket *b;
+ for (b = H2_BLIST_FIRST(&beam->red);
+ b != H2_BLIST_SENTINEL(&beam->red);
+ b = APR_BUCKET_NEXT(b)) {
+ if (b->length == ((apr_size_t)-1)) {
+ /* do not count */
+ }
+ else if (APR_BUCKET_IS_FILE(b)) {
+ /* if unread, has no real mem footprint. how to test? */
+ }
+ else {
+ len += b->length;
+ }
+ }
+ return len;
+}
+
+static void r_purge_reds(h2_bucket_beam *beam)
+{
+ apr_bucket *bred;
+ /* delete all red buckets in purge brigade, needs to be called
+ * from red thread only */
+ while (!H2_BLIST_EMPTY(&beam->purge)) {
+ bred = H2_BLIST_FIRST(&beam->purge);
+ apr_bucket_delete(bred);
+ }
+}
+
+static apr_size_t calc_space_left(h2_bucket_beam *beam)
+{
+ if (beam->max_buf_size > 0) {
+ apr_off_t len = calc_buffered(beam);
+ return (beam->max_buf_size > len? (beam->max_buf_size - len) : 0);
+ }
+ return APR_SIZE_MAX;
+}
+
+static apr_status_t wait_cond(h2_bucket_beam *beam, apr_thread_mutex_t *lock)
+{
+ if (beam->timeout > 0) {
+ return apr_thread_cond_timedwait(beam->m_cond, lock, beam->timeout);
+ }
+ else {
+ return apr_thread_cond_wait(beam->m_cond, lock);
+ }
+}
+
+static apr_status_t r_wait_space(h2_bucket_beam *beam, apr_read_type_e block,
+ h2_beam_lock *pbl, apr_off_t *premain)
+{
+ *premain = calc_space_left(beam);
+ while (!beam->aborted && *premain <= 0
+ && (block == APR_BLOCK_READ) && pbl->mutex) {
+ apr_status_t status = wait_cond(beam, pbl->mutex);
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ return status;
+ }
+ r_purge_reds(beam);
+ *premain = calc_space_left(beam);
+ }
+ return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
+}
+
+static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
+{
+ h2_beam_lock bl;
+ apr_bucket *b, *next;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ /* even when beam buckets are split, only the one where
+ * refcount drops to 0 will call us */
+ H2_BPROXY_REMOVE(proxy);
+ /* invoked from green thread, the last beam bucket for the red
+ * bucket bred is about to be destroyed.
+ * remove it from the hold, where it should be now */
+ if (proxy->bred) {
+ for (b = H2_BLIST_FIRST(&beam->hold);
+ b != H2_BLIST_SENTINEL(&beam->hold);
+ b = APR_BUCKET_NEXT(b)) {
+ if (b == proxy->bred) {
+ break;
+ }
+ }
+ if (b != H2_BLIST_SENTINEL(&beam->hold)) {
+ /* bucket is in hold as it should be, mark this one
+ * and all before it for purging. We might have placed meta
+ * buckets without a green proxy into the hold before it
+ * and schedule them for purging now */
+ for (b = H2_BLIST_FIRST(&beam->hold);
+ b != H2_BLIST_SENTINEL(&beam->hold);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (b == proxy->bred) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->purge, b);
+ break;
+ }
+ else if (APR_BUCKET_IS_METADATA(b)) {
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->purge, b);
+ }
+ else {
+ /* another data bucket before this one in hold. this
+ * is normal since DATA buckets need not be destroyed
+ * in order */
+ }
+ }
+
+ proxy->bred = NULL;
+ }
+ else {
+ /* it should be there unless we screwed up */
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->red_pool,
+ APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
+ "in hold, n=%d", beam->id, beam->tag,
+ (int)proxy->n);
+ AP_DEBUG_ASSERT(!proxy->bred);
+ }
+ }
+ /* notify anyone waiting on space to become available */
+ if (!bl.mutex) {
+ r_purge_reds(beam);
+ }
+ else if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ leave_yellow(beam, &bl);
+ }
+}
+
+static void report_consumption(h2_bucket_beam *beam, int force)
+{
+ if (force || beam->received_bytes != beam->reported_consumed_bytes) {
+ if (beam->consumed_fn) {
+ beam->consumed_fn(beam->consumed_ctx, beam, beam->received_bytes
+ - beam->reported_consumed_bytes);
+ }
+ beam->reported_consumed_bytes = beam->received_bytes;
+ }
+}
+
+static void report_production(h2_bucket_beam *beam, int force)
+{
+ if (force || beam->sent_bytes != beam->reported_produced_bytes) {
+ if (beam->produced_fn) {
+ beam->produced_fn(beam->produced_ctx, beam, beam->sent_bytes
+ - beam->reported_produced_bytes);
+ }
+ beam->reported_produced_bytes = beam->sent_bytes;
+ }
+}
+
+static void h2_blist_cleanup(h2_blist *bl)
+{
+ apr_bucket *e;
+
+ while (!H2_BLIST_EMPTY(bl)) {
+ e = H2_BLIST_FIRST(bl);
+ apr_bucket_delete(e);
+ }
+}
+
+static apr_status_t beam_close(h2_bucket_beam *beam)
+{
+ if (!beam->closed) {
+ beam->closed = 1;
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t beam_cleanup(void *data)
+{
+ h2_bucket_beam *beam = data;
+
+ beam_close(beam);
+ r_purge_reds(beam);
+ h2_blist_cleanup(&beam->red);
+ report_consumption(beam, 0);
+ while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) {
+ h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
+ H2_BPROXY_REMOVE(proxy);
+ proxy->beam = NULL;
+ proxy->bred = NULL;
+ }
+ h2_blist_cleanup(&beam->purge);
+ h2_blist_cleanup(&beam->hold);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam)
+{
+ apr_pool_cleanup_kill(beam->red_pool, beam, beam_cleanup);
+ return beam_cleanup(beam);
+}
+
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *red_pool,
+ int id, const char *tag,
+ apr_size_t max_buf_size)
+{
+ h2_bucket_beam *beam;
+ apr_status_t status = APR_SUCCESS;
+
+ beam = apr_pcalloc(red_pool, sizeof(*beam));
+ if (!beam) {
+ return APR_ENOMEM;
+ }
+
+ beam->id = id;
+ beam->tag = tag;
+ H2_BLIST_INIT(&beam->red);
+ H2_BLIST_INIT(&beam->hold);
+ H2_BLIST_INIT(&beam->purge);
+ H2_BPROXY_LIST_INIT(&beam->proxies);
+ beam->red_pool = red_pool;
+ beam->max_buf_size = max_buf_size;
+
+ apr_pool_pre_cleanup_register(red_pool, beam, beam_cleanup);
+ *pbeam = beam;
+
+ return status;
+}
+
+void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->max_buf_size = buffer_size;
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ apr_size_t buffer_size = 0;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ buffer_size = beam->max_buf_size;
+ leave_yellow(beam, &bl);
+ }
+ return buffer_size;
+}
+
+void h2_beam_mutex_set(h2_bucket_beam *beam,
+ h2_beam_mutex_enter m_enter,
+ apr_thread_cond_t *cond,
+ void *m_ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->m_enter = m_enter;
+ beam->m_ctx = m_ctx;
+ beam->m_cond = cond;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->timeout = timeout;
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ apr_interval_time_t timeout = 0;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ timeout = beam->timeout;
+ leave_yellow(beam, &bl);
+ }
+ return timeout;
+}
+
+void h2_beam_abort(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_reds(beam);
+ h2_blist_cleanup(&beam->red);
+ beam->aborted = 1;
+ report_consumption(beam, 0);
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ leave_yellow(beam, &bl);
+ }
+}
+
+apr_status_t h2_beam_close(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_reds(beam);
+ beam_close(beam);
+ report_consumption(beam, 0);
+ leave_yellow(beam, &bl);
+ }
+ return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
+}
+
+apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block,
+ int clear_buffers)
+{
+ apr_status_t status;
+ h2_beam_lock bl;
+
+ if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) {
+ if (clear_buffers) {
+ r_purge_reds(beam);
+ h2_blist_cleanup(&beam->red);
+ }
+ beam_close(beam);
+
+ while (status == APR_SUCCESS
+ && (!H2_BPROXY_LIST_EMPTY(&beam->proxies)
+ || (beam->green && !APR_BRIGADE_EMPTY(beam->green)))) {
+ if (block == APR_NONBLOCK_READ || !bl.mutex) {
+ status = APR_EAGAIN;
+ break;
+ }
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ status = wait_cond(beam, bl.mutex);
+ }
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+static apr_status_t append_bucket(h2_bucket_beam *beam,
+ apr_bucket *bred,
+ apr_read_type_e block,
+ apr_pool_t *pool,
+ h2_beam_lock *pbl)
+{
+ const char *data;
+ apr_size_t len;
+ apr_off_t space_left = 0;
+ apr_status_t status;
+
+ if (APR_BUCKET_IS_METADATA(bred)) {
+ if (APR_BUCKET_IS_EOS(bred)) {
+ beam->closed = 1;
+ }
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->red, bred);
+ return APR_SUCCESS;
+ }
+ else if (APR_BUCKET_IS_FILE(bred)) {
+ /* file bucket lengths do not really count */
+ }
+ else {
+ space_left = calc_space_left(beam);
+ if (space_left > 0 && bred->length == ((apr_size_t)-1)) {
+ const char *data;
+ status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (space_left < bred->length) {
+ status = r_wait_space(beam, block, pbl, &space_left);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ if (space_left <= 0) {
+ return APR_EAGAIN;
+ }
+ }
+ /* space available, maybe need bucket split */
+ }
+
+
+ /* The fundamental problem is that reading a red bucket from
+ * a green thread is a total NO GO, because the bucket might use
+ * its pool/bucket_alloc from a foreign thread and that will
+ * corrupt. */
+ status = APR_ENOTIMPL;
+ if (beam->closed && bred->length > 0) {
+ status = APR_EOF;
+ }
+ else if (APR_BUCKET_IS_TRANSIENT(bred)) {
+ /* this takes care of transient buckets and converts them
+ * into heap ones. Other bucket types might or might not be
+ * affected by this. */
+ status = apr_bucket_setaside(bred, pool);
+ }
+ else if (APR_BUCKET_IS_HEAP(bred)) {
+ /* For heap buckets read from a green thread is fine. The
+ * data will be there and live until the bucket itself is
+ * destroyed. */
+ status = APR_SUCCESS;
+ }
+ else if (APR_BUCKET_IS_POOL(bred)) {
+ /* pool buckets are bastards that register at pool cleanup
+ * to morph themselves into heap buckets. That may happen anytime,
+ * even after the bucket data pointer has been read. So at
+ * any time inside the green thread, the pool bucket memory
+ * may disappear. yikes. */
+ status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ apr_bucket_heap_make(bred, data, len, NULL);
+ }
+ }
+ else if (APR_BUCKET_IS_FILE(bred)) {
+ /* For file buckets the problem is their internal readpool that
+ * is used on the first read to allocate buffer/mmap.
+ * Since setting aside a file bucket will de-register the
+ * file cleanup function from the previous pool, we need to
+ * call that from a red thread.
+ * Additionally, we allow callbacks to prevent beaming file
+ * handles across. The use case for this is to limit the number
+ * of open file handles and rather use a less efficient beam
+ * transport. */
+ apr_file_t *fd = ((apr_bucket_file *)bred->data)->fd;
+ int can_beam = 1;
+ if (beam->last_beamed != fd && beam->can_beam_fn) {
+ can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
+ }
+ if (can_beam) {
+ beam->last_beamed = fd;
+ status = apr_bucket_setaside(bred, pool);
+ }
+ /* else: enter ENOTIMPL case below */
+ }
+
+ if (status == APR_ENOTIMPL) {
+ /* we have no knowledge about the internals of this bucket,
+ * but hope that after read, its data stays immutable for the
+ * lifetime of the bucket. (see pool bucket handling above for
+ * a counter example).
+ * We do the read while in a red thread, so that the bucket may
+ * use pools/allocators safely. */
+ if (space_left < APR_BUCKET_BUFF_SIZE) {
+ space_left = APR_BUCKET_BUFF_SIZE;
+ }
+ if (space_left < bred->length) {
+ apr_bucket_split(bred, space_left);
+ }
+ status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ status = apr_bucket_setaside(bred, pool);
+ }
+ }
+
+ if (status != APR_SUCCESS && status != APR_ENOTIMPL) {
+ return status;
+ }
+
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->red, bred);
+ beam->sent_bytes += bred->length;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_send(h2_bucket_beam *beam,
+ apr_bucket_brigade *red_brigade,
+ apr_read_type_e block)
+{
+ apr_bucket *bred;
+ apr_status_t status = APR_SUCCESS;
+ h2_beam_lock bl;
+
+ /* Called from the red thread to add buckets to the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_reds(beam);
+
+ if (beam->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else if (red_brigade) {
+ int force_report = !APR_BRIGADE_EMPTY(red_brigade);
+ while (!APR_BRIGADE_EMPTY(red_brigade)
+ && status == APR_SUCCESS) {
+ bred = APR_BRIGADE_FIRST(red_brigade);
+ status = append_bucket(beam, bred, block, beam->red_pool, &bl);
+ }
+ report_production(beam, force_report);
+ if (beam->m_cond) {
+ apr_thread_cond_broadcast(beam->m_cond);
+ }
+ }
+ report_consumption(beam, 0);
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_beam_lock bl;
+ apr_bucket *bred, *bgreen, *ng;
+ int transferred = 0;
+ apr_status_t status = APR_SUCCESS;
+ apr_off_t remain = readbytes;
+
+ /* Called from the green thread to take buckets from the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+transfer:
+ if (beam->aborted) {
+ if (beam->green && !APR_BRIGADE_EMPTY(beam->green)) {
+ apr_brigade_cleanup(beam->green);
+ }
+ status = APR_ECONNABORTED;
+ goto leave;
+ }
+
+ /* transfer enough buckets from our green brigade, if we have one */
+ while (beam->green
+ && !APR_BRIGADE_EMPTY(beam->green)
+ && (readbytes <= 0 || remain >= 0)) {
+ bgreen = APR_BRIGADE_FIRST(beam->green);
+ if (readbytes > 0 && bgreen->length > 0 && remain <= 0) {
+ break;
+ }
+ APR_BUCKET_REMOVE(bgreen);
+ APR_BRIGADE_INSERT_TAIL(bb, bgreen);
+ remain -= bgreen->length;
+ ++transferred;
+ }
+
+ /* transfer from our red brigade, transforming red buckets to
+ * green ones until we have enough */
+ while (!H2_BLIST_EMPTY(&beam->red) && (readbytes <= 0 || remain >= 0)) {
+ bred = H2_BLIST_FIRST(&beam->red);
+ bgreen = NULL;
+
+ if (readbytes > 0 && bred->length > 0 && remain <= 0) {
+ break;
+ }
+
+ if (APR_BUCKET_IS_METADATA(bred)) {
+ if (APR_BUCKET_IS_EOS(bred)) {
+ bgreen = apr_bucket_eos_create(bb->bucket_alloc);
+ beam->close_sent = 1;
+ }
+ else if (APR_BUCKET_IS_FLUSH(bred)) {
+ bgreen = apr_bucket_flush_create(bb->bucket_alloc);
+ }
+ else {
+ /* put red into hold, no green sent out */
+ }
+ }
+ else if (APR_BUCKET_IS_FILE(bred)) {
+ /* This is set aside into the target brigade pool so that
+ * any read operation messes with that pool and not
+ * the red one. */
+ apr_bucket_file *f = (apr_bucket_file *)bred->data;
+ apr_file_t *fd = f->fd;
+ int setaside = (f->readpool != bb->p);
+
+ if (setaside) {
+ status = apr_file_setaside(&fd, fd, bb->p);
+ if (status != APR_SUCCESS) {
+ goto leave;
+ }
+ ++beam->files_beamed;
+ }
+ ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length,
+ bb->p);
+#if APR_HAS_MMAP
+ /* disable mmap handling as this leads to segfaults when
+ * the underlying file is changed while memory pointer has
+ * been handed out. See also PR 59348 */
+ apr_bucket_file_enable_mmap(ng, 0);
+#endif
+ remain -= bred->length;
+ ++transferred;
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->hold, bred);
+ ++transferred;
+ continue;
+ }
+ else {
+ /* create a "green" standin bucket. we took care about the
+ * underlying red bucket and its data when we placed it into
+ * the red brigade.
+ * the beam bucket will notify us on destruction that bred is
+ * no longer needed. */
+ bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc,
+ beam->buckets_sent++);
+ }
+
+ /* Place the red bucket into our hold, to be destroyed when no
+ * green bucket references it any more. */
+ APR_BUCKET_REMOVE(bred);
+ H2_BLIST_INSERT_TAIL(&beam->hold, bred);
+ beam->received_bytes += bred->length;
+ if (bgreen) {
+ APR_BRIGADE_INSERT_TAIL(bb, bgreen);
+ remain -= bgreen->length;
+ ++transferred;
+ }
+ }
+
+ if (readbytes > 0 && remain < 0) {
+ /* too much, put some back */
+ remain = readbytes;
+ for (bgreen = APR_BRIGADE_FIRST(bb);
+ bgreen != APR_BRIGADE_SENTINEL(bb);
+ bgreen = APR_BUCKET_NEXT(bgreen)) {
+ remain -= bgreen->length;
+ if (remain < 0) {
+ apr_bucket_split(bgreen, bgreen->length+remain);
+ beam->green = apr_brigade_split_ex(bb,
+ APR_BUCKET_NEXT(bgreen),
+ beam->green);
+ break;
+ }
+ }
+ }
+
+ if (beam->closed
+ && (!beam->green || APR_BRIGADE_EMPTY(beam->green))
+ && H2_BLIST_EMPTY(&beam->red)) {
+ /* beam is closed and we have nothing more to receive */
+ if (!beam->close_sent) {
+ apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ beam->close_sent = 1;
+ ++transferred;
+ status = APR_SUCCESS;
+ }
+ }
+
+ if (transferred) {
+ status = APR_SUCCESS;
+ }
+ else if (beam->closed) {
+ status = APR_EOF;
+ }
+ else if (block == APR_BLOCK_READ && bl.mutex && beam->m_cond) {
+ status = wait_cond(beam, bl.mutex);
+ if (status != APR_SUCCESS) {
+ goto leave;
+ }
+ goto transfer;
+ }
+ else {
+ status = APR_EAGAIN;
+ }
+leave:
+ leave_yellow(beam, &bl);
+ }
+ return status;
+}
+
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->consumed_fn = cb;
+ beam->consumed_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_on_produced(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->produced_fn = cb;
+ beam->produced_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+void h2_beam_on_file_beam(h2_bucket_beam *beam,
+ h2_beam_can_beam_callback *cb, void *ctx)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ beam->can_beam_fn = cb;
+ beam->can_beam_ctx = ctx;
+ leave_yellow(beam, &bl);
+ }
+}
+
+
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ for (b = H2_BLIST_FIRST(&beam->red);
+ b != H2_BLIST_SENTINEL(&beam->red);
+ b = APR_BUCKET_NEXT(b)) {
+ /* should all have determinate length */
+ l += b->length;
+ }
+ leave_yellow(beam, &bl);
+ }
+ return l;
+}
+
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam)
+{
+ apr_bucket *b;
+ apr_off_t l = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ for (b = H2_BLIST_FIRST(&beam->red);
+ b != H2_BLIST_SENTINEL(&beam->red);
+ b = APR_BUCKET_NEXT(b)) {
+ if (APR_BUCKET_IS_FILE(b)) {
+ /* do not count */
+ }
+ else {
+ /* should all have determinate length */
+ l += b->length;
+ }
+ }
+ leave_yellow(beam, &bl);
+ }
+ return l;
+}
+
+int h2_beam_empty(h2_bucket_beam *beam)
+{
+ int empty = 1;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ empty = (H2_BLIST_EMPTY(&beam->red)
+ && (!beam->green || APR_BRIGADE_EMPTY(beam->green)));
+ leave_yellow(beam, &bl);
+ }
+ return empty;
+}
+
+int h2_beam_closed(h2_bucket_beam *beam)
+{
+ return beam->closed;
+}
+
+int h2_beam_was_received(h2_bucket_beam *beam)
+{
+ int happend = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ happend = (beam->received_bytes > 0);
+ leave_yellow(beam, &bl);
+ }
+ return happend;
+}
+
+apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam)
+{
+ apr_size_t n = 0;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ n = beam->files_beamed;
+ leave_yellow(beam, &bl);
+ }
+ return n;
+}
+
diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h
new file mode 100644
index 00000000..5c5d65de
--- /dev/null
+++ b/modules/http2/h2_bucket_beam.h
@@ -0,0 +1,363 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_bucket_beam_h
+#define h2_bucket_beam_h
+
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+
+/*******************************************************************************
+ * apr_bucket list without bells and whistles
+ ******************************************************************************/
+
+/**
+ * h2_blist can hold a list of buckets just like apr_bucket_brigade, but
+ * does not to any allocations or related features.
+ */
+typedef struct {
+ APR_RING_HEAD(h2_bucket_list, apr_bucket) list;
+} h2_blist;
+
+#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link);
+#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link)
+#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link)
+#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_BLIST_INSERT_HEAD(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_INSERT_TAIL(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+#define H2_BLIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \
+ } while (0)
+
+/**
+ * Print the buckets in the list into the buffer (type and lengths).
+ * @param buffer the buffer to print into
+ * @param bmax max number of characters to place in buffer, incl. trailing 0
+ * @param tag tag string for this bucket list
+ * @param sep separator to use
+ * @param bl the bucket list to print
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ h2_blist *bl);
+
+/*******************************************************************************
+ * h2_bucket_beam
+ ******************************************************************************/
+
+/**
+ * A h2_bucket_beam solves the task of transferring buckets, esp. their data,
+ * across threads with zero buffer copies.
+ *
+ * When a thread, let's call it the red thread, wants to send buckets to
+ * another, the green thread, it creates a h2_bucket_beam and adds buckets
+ * via the h2_beam_send(). It gives the beam to the green thread which then
+ * can receive buckets into its own brigade via h2_beam_receive().
+ *
+ * Sending and receiving can happen concurrently, if a thread mutex is set
+ * for the beam, see h2_beam_mutex_set.
+ *
+ * The beam can limit the amount of data it accepts via the buffer_size. This
+ * can also be adjusted during its lifetime. When the beam not only gets a
+ * mutex but als a condition variable (in h2_beam_mutex_set()), sends and
+ * receives can be done blocking. A timeout can be set for such blocks.
+ *
+ * Care needs to be taken when terminating the beam. The beam registers at
+ * the pool it was created with and will cleanup after itself. However, if
+ * received buckets do still exist, already freed memory might be accessed.
+ * The beam does a AP_DEBUG_ASSERT on this condition.
+ *
+ * The proper way of shutting down a beam is to first make sure there are no
+ * more green buckets out there, then cleanup the beam to purge eventually
+ * still existing red buckets and then, possibly, terminate the beam itself
+ * (or the pool it was created with).
+ *
+ * The following restrictions apply to bucket transport:
+ * - only EOS and FLUSH meta buckets are copied through. All other meta buckets
+ * are kept in the beams hold.
+ * - all kind of data buckets are transported through:
+ * - transient buckets are converted to heap ones on send
+ * - heap and pool buckets require no extra handling
+ * - buckets with indeterminate length are read on send
+ * - file buckets will transfer the file itself into a new bucket, if allowed
+ * - all other buckets are read on send to make sure data is present
+ *
+ * This assures that when the red thread sends its red buckets, the data
+ * is made accessible while still on the red side. The red bucket then enters
+ * the beams hold storage.
+ * When the green thread calls receive, red buckets in the hold are wrapped
+ * into special beam buckets. Beam buckets on read present the data directly
+ * from the internal red one, but otherwise live on the green side. When a
+ * beam bucket gets destroyed, it notifies its beam that the corresponding
+ * red bucket from the hold may be destroyed.
+ * Since the destruction of green buckets happens in the green thread, any
+ * corresponding red bucket can not immediately be destroyed, as that would
+ * result in race conditions.
+ * Instead, the beam transfers such red buckets from the hold to the purge
+ * storage. Next time there is a call from the red side, the buckets in
+ * purge will be deleted.
+ *
+ * There are callbacks that can be registered with a beam:
+ * - a "consumed" callback that gets called on the red side with the
+ * amount of data that has been received by the green side. The amount
+ * is a delta from the last callback invocation. The red side can trigger
+ * these callbacks by calling h2_beam_send() with a NULL brigade.
+ * - a "can_beam_file" callback that can prohibit the transfer of file handles
+ * through the beam. This will cause file buckets to be read on send and
+ * its data buffer will then be transports just like a heap bucket would.
+ * When no callback is registered, no restrictions apply and all files are
+ * passed through.
+ * File handles transferred to the green side will stay there until the
+ * receiving brigade's pool is destroyed/cleared. If the pool lives very
+ * long or if many different files are beamed, the process might run out
+ * of available file handles.
+ *
+ * The name "beam" of course is inspired by good old transporter
+ * technology where humans are kept inside the transporter's memory
+ * buffers until the transmission is complete. Star gates use a similar trick.
+ */
+
+typedef void h2_beam_mutex_leave(void *ctx, struct apr_thread_mutex_t *lock);
+
+typedef struct {
+ apr_thread_mutex_t *mutex;
+ h2_beam_mutex_leave *leave;
+ void *leave_ctx;
+} h2_beam_lock;
+
+typedef struct h2_bucket_beam h2_bucket_beam;
+
+typedef apr_status_t h2_beam_mutex_enter(void *ctx, h2_beam_lock *pbl);
+
+typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam,
+ apr_off_t bytes);
+
+typedef struct h2_beam_proxy h2_beam_proxy;
+typedef struct {
+ APR_RING_HEAD(h2_beam_proxy_list, h2_beam_proxy) list;
+} h2_bproxy_list;
+
+typedef int h2_beam_can_beam_callback(void *ctx, h2_bucket_beam *beam,
+ apr_file_t *file);
+
+struct h2_bucket_beam {
+ int id;
+ const char *tag;
+ h2_blist red;
+ h2_blist hold;
+ h2_blist purge;
+ apr_bucket_brigade *green;
+ h2_bproxy_list proxies;
+ apr_pool_t *red_pool;
+
+ apr_size_t max_buf_size;
+ apr_interval_time_t timeout;
+
+ apr_off_t sent_bytes; /* amount of bytes send */
+ apr_off_t received_bytes; /* amount of bytes received */
+
+ apr_size_t buckets_sent; /* # of beam buckets sent */
+ apr_size_t files_beamed; /* how many file handles have been set aside */
+ apr_file_t *last_beamed; /* last file beamed */
+
+ unsigned int aborted : 1;
+ unsigned int closed : 1;
+ unsigned int close_sent : 1;
+
+ void *m_ctx;
+ h2_beam_mutex_enter *m_enter;
+ struct apr_thread_cond_t *m_cond;
+
+ apr_off_t reported_consumed_bytes; /* amount of bytes reported as consumed */
+ h2_beam_io_callback *consumed_fn;
+ void *consumed_ctx;
+ apr_off_t reported_produced_bytes; /* amount of bytes reported as produced */
+ h2_beam_io_callback *produced_fn;
+ void *produced_ctx;
+ h2_beam_can_beam_callback *can_beam_fn;
+ void *can_beam_ctx;
+};
+
+/**
+ * Creates a new bucket beam for transfer of buckets across threads.
+ *
+ * The pool the beam is created with will be protected by the given
+ * mutex and will be used in multiple threads. It needs a pool allocator
+ * that is only used inside that same mutex.
+ *
+ * @param pbeam will hold the created beam on return
+ * @param red_pool pool usable on red side, beam lifeline
+ * @param buffer_size maximum memory footprint of buckets buffered in beam, or
+ * 0 for no limitation
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam,
+ apr_pool_t *red_pool,
+ int id, const char *tag,
+ apr_size_t buffer_size);
+
+/**
+ * Destroys the beam immediately without cleanup.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam);
+
+/**
+ * Send buckets from the given brigade through the beam. Will hold buckets
+ * internally as long as they have not been processed by the receiving side.
+ * All accepted buckets are removed from the given brigade. Will return with
+ * APR_EAGAIN on non-blocking sends when not all buckets could be accepted.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_send(h2_bucket_beam *beam,
+ apr_bucket_brigade *red_buckets,
+ apr_read_type_e block);
+
+/**
+ * Receive buckets from the beam into the given brigade. Will return APR_EOF
+ * when reading past an EOS bucket. Reads can be blocking until data is
+ * available or the beam has been closed. Non-blocking calls return APR_EAGAIN
+ * if no data is available.
+ *
+ * Call from the green side only.
+ */
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ apr_bucket_brigade *green_buckets,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+/**
+ * Determine if beam is closed. May still contain buffered data.
+ *
+ * Call from red or green side.
+ */
+int h2_beam_closed(h2_bucket_beam *beam);
+
+/**
+ * Determine if beam is empty.
+ *
+ * Call from red or green side.
+ */
+int h2_beam_empty(h2_bucket_beam *beam);
+
+/**
+ * Abort the beam. Will cleanup any buffered buckets and answer all send
+ * and receives with APR_ECONNABORTED.
+ *
+ * Call from the red side only.
+ */
+void h2_beam_abort(h2_bucket_beam *beam);
+
+/**
+ * Close the beam. Sending an EOS bucket serves the same purpose.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_close(h2_bucket_beam *beam);
+
+/**
+ * Return APR_SUCCESS when all buckets in transit have been handled.
+ * When called with APR_BLOCK_READ and a mutex set, will wait until the green
+ * side has consumed all data. Otherwise APR_EAGAIN is returned.
+ * With clear_buffers set, any queued data is discarded.
+ * If a timeout is set on the beam, waiting might also time out and
+ * return APR_ETIMEUP.
+ *
+ * Call from the red side only.
+ */
+apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block,
+ int clear_buffers);
+
+void h2_beam_mutex_set(h2_bucket_beam *beam,
+ h2_beam_mutex_enter m_enter,
+ struct apr_thread_cond_t *cond,
+ void *m_ctx);
+
+/**
+ * Set/get the timeout for blocking read/write operations. Only works
+ * if a mutex has been set for the beam.
+ */
+void h2_beam_timeout_set(h2_bucket_beam *beam,
+ apr_interval_time_t timeout);
+apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam);
+
+/**
+ * Set/get the maximum buffer size for beam data (memory footprint).
+ */
+void h2_beam_buffer_size_set(h2_bucket_beam *beam,
+ apr_size_t buffer_size);
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam);
+
+/**
+ * Register a callback to be invoked on the red side with the
+ * amount of bytes that have been consumed by the red side, since the
+ * last callback invocation or reset.
+ * @param beam the beam to set the callback on
+ * @param cb the callback or NULL
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the red side, callbacks invoked on red side.
+ */
+void h2_beam_on_consumed(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx);
+
+/**
+ * Register a callback to be invoked on the red side with the
+ * amount of bytes that have been consumed by the red side, since the
+ * last callback invocation or reset.
+ * @param beam the beam to set the callback on
+ * @param cb the callback or NULL
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the red side, callbacks invoked on red side.
+ */
+void h2_beam_on_produced(h2_bucket_beam *beam,
+ h2_beam_io_callback *cb, void *ctx);
+
+void h2_beam_on_file_beam(h2_bucket_beam *beam,
+ h2_beam_can_beam_callback *cb, void *ctx);
+
+/**
+ * Get the amount of bytes currently buffered in the beam (unread).
+ */
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam);
+
+/**
+ * Get the memory used by the buffered buckets, approximately.
+ */
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam);
+
+/**
+ * Return != 0 iff (some) data from the beam has been received.
+ */
+int h2_beam_was_received(h2_bucket_beam *beam);
+
+apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam);
+
+#endif /* h2_bucket_beam_h */
diff --git a/modules/http2/h2_bucket_eoc.c b/modules/http2/h2_bucket_eoc.c
new file mode 100644
index 00000000..33144ef5
--- /dev/null
+++ b/modules/http2/h2_bucket_eoc.c
@@ -0,0 +1,110 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_mplx.h"
+#include "h2_session.h"
+#include "h2_bucket_eoc.h"
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_session *session;
+} h2_bucket_eoc;
+
+static apr_status_t bucket_cleanup(void *data)
+{
+ h2_session **psession = data;
+
+ if (*psession) {
+ /*
+ * If bucket_destroy is called after us, this prevents
+ * bucket_destroy from trying to destroy the pool again.
+ */
+ *psession = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket * h2_bucket_eoc_make(apr_bucket *b, h2_session *session)
+{
+ h2_bucket_eoc *h;
+
+ h = apr_bucket_alloc(sizeof(*h), b->list);
+ h->session = session;
+
+ b = apr_bucket_shared_make(b, h, 0, 0);
+ b->type = &h2_bucket_type_eoc;
+
+ return b;
+}
+
+apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list, h2_session *session)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_eoc_make(b, session);
+ if (session) {
+ h2_bucket_eoc *h = b->data;
+ apr_pool_pre_cleanup_register(session->pool, &h->session, bucket_cleanup);
+ }
+ return b;
+}
+
+static void bucket_destroy(void *data)
+{
+ h2_bucket_eoc *h = data;
+
+ if (apr_bucket_shared_destroy(h)) {
+ h2_session *session = h->session;
+ apr_bucket_free(h);
+ if (session) {
+ h2_session_eoc_callback(session);
+ /* all is gone now */
+ }
+ }
+}
+
+const apr_bucket_type_t h2_bucket_type_eoc = {
+ "H2EOC", 5, APR_BUCKET_METADATA,
+ bucket_destroy,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
diff --git a/modules/http2/h2_bucket_eoc.h b/modules/http2/h2_bucket_eoc.h
new file mode 100644
index 00000000..2d466919
--- /dev/null
+++ b/modules/http2/h2_bucket_eoc.h
@@ -0,0 +1,32 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_http2_h2_bucket_eoc_h
+#define mod_http2_h2_bucket_eoc_h
+
+struct h2_session;
+
+/** End Of HTTP/2 SESSION (H2EOC) bucket */
+extern const apr_bucket_type_t h2_bucket_type_eoc;
+
+#define H2_BUCKET_IS_H2EOC(e) (e->type == &h2_bucket_type_eoc)
+
+apr_bucket * h2_bucket_eoc_make(apr_bucket *b,
+ struct h2_session *session);
+
+apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list,
+ struct h2_session *session);
+
+#endif /* mod_http2_h2_bucket_eoc_h */
diff --git a/modules/http2/h2_bucket_eos.c b/modules/http2/h2_bucket_eos.c
new file mode 100644
index 00000000..28c34fdc
--- /dev/null
+++ b/modules/http2/h2_bucket_eos.c
@@ -0,0 +1,111 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_mplx.h"
+#include "h2_stream.h"
+#include "h2_bucket_eos.h"
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_stream *stream;
+} h2_bucket_eos;
+
+static apr_status_t bucket_cleanup(void *data)
+{
+ h2_stream **pstream = data;
+
+ if (*pstream) {
+ /* If bucket_destroy is called after us, this prevents
+ * bucket_destroy from trying to destroy the stream again. */
+ *pstream = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket *h2_bucket_eos_make(apr_bucket *b, h2_stream *stream)
+{
+ h2_bucket_eos *h;
+
+ h = apr_bucket_alloc(sizeof(*h), b->list);
+ h->stream = stream;
+
+ b = apr_bucket_shared_make(b, h, 0, 0);
+ b->type = &h2_bucket_type_eos;
+
+ return b;
+}
+
+apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list,
+ h2_stream *stream)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_eos_make(b, stream);
+ if (stream) {
+ h2_bucket_eos *h = b->data;
+ apr_pool_pre_cleanup_register(stream->pool, &h->stream, bucket_cleanup);
+ }
+ return b;
+}
+
+static void bucket_destroy(void *data)
+{
+ h2_bucket_eos *h = data;
+
+ if (apr_bucket_shared_destroy(h)) {
+ h2_stream *stream = h->stream;
+ if (stream && stream->pool) {
+ apr_pool_cleanup_kill(stream->pool, &h->stream, bucket_cleanup);
+ }
+ apr_bucket_free(h);
+ if (stream) {
+ h2_stream_eos_destroy(stream);
+ }
+ }
+}
+
+const apr_bucket_type_t h2_bucket_type_eos = {
+ "H2EOS", 5, APR_BUCKET_METADATA,
+ bucket_destroy,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
diff --git a/modules/http2/h2_bucket_eos.h b/modules/http2/h2_bucket_eos.h
new file mode 100644
index 00000000..27b501da
--- /dev/null
+++ b/modules/http2/h2_bucket_eos.h
@@ -0,0 +1,31 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_http2_h2_bucket_stream_eos_h
+#define mod_http2_h2_bucket_stream_eos_h
+
+struct h2_stream;
+
+/** End Of HTTP/2 STREAM (H2EOS) bucket */
+extern const apr_bucket_type_t h2_bucket_type_eos;
+
+#define H2_BUCKET_IS_H2EOS(e) (e->type == &h2_bucket_type_eos)
+
+apr_bucket *h2_bucket_eos_make(apr_bucket *b, struct h2_stream *stream);
+
+apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list,
+ struct h2_stream *stream);
+
+#endif /* mod_http2_h2_bucket_stream_eos_h */
diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c
new file mode 100644
index 00000000..0c1e6c46
--- /dev/null
+++ b/modules/http2/h2_config.c
@@ -0,0 +1,568 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_hash.h>
+#include <apr_lib.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_vhost.h>
+
+#include <ap_mpm.h>
+
+#include <apr_strings.h>
+
+#include "h2.h"
+#include "h2_alt_svc.h"
+#include "h2_ctx.h"
+#include "h2_conn.h"
+#include "h2_config.h"
+#include "h2_h2.h"
+#include "h2_private.h"
+
+#define DEF_VAL (-1)
+
+#define H2_CONFIG_GET(a, b, n) \
+ (((a)->n == DEF_VAL)? (b) : (a))->n
+
+static h2_config defconf = {
+ "default",
+ 100, /* max_streams */
+ H2_INITIAL_WINDOW_SIZE, /* window_size */
+ -1, /* min workers */
+ -1, /* max workers */
+ 10 * 60, /* max workers idle secs */
+ 64 * 1024, /* stream max mem size */
+ NULL, /* no alt-svcs */
+ -1, /* alt-svc max age */
+ 0, /* serialize headers */
+ -1, /* h2 direct mode */
+ -1, /* # session extra files */
+ 1, /* modern TLS only */
+ -1, /* HTTP/1 Upgrade support */
+ 1024*1024, /* TLS warmup size */
+ 1, /* TLS cooldown secs */
+ 1, /* HTTP/2 server push enabled */
+ NULL, /* map of content-type to priorities */
+ 256, /* push diary size */
+
+};
+
+void h2_config_init(apr_pool_t *pool)
+{
+ (void)pool;
+}
+
+static void *h2_config_create(apr_pool_t *pool,
+ const char *prefix, const char *x)
+{
+ h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
+
+ const char *s = x? x : "unknown";
+ char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL);
+
+ conf->name = name;
+ conf->h2_max_streams = DEF_VAL;
+ conf->h2_window_size = DEF_VAL;
+ conf->min_workers = DEF_VAL;
+ conf->max_workers = DEF_VAL;
+ conf->max_worker_idle_secs = DEF_VAL;
+ conf->stream_max_mem_size = DEF_VAL;
+ conf->alt_svc_max_age = DEF_VAL;
+ conf->serialize_headers = DEF_VAL;
+ conf->h2_direct = DEF_VAL;
+ conf->session_extra_files = DEF_VAL;
+ conf->modern_tls_only = DEF_VAL;
+ conf->h2_upgrade = DEF_VAL;
+ conf->tls_warmup_size = DEF_VAL;
+ conf->tls_cooldown_secs = DEF_VAL;
+ conf->h2_push = DEF_VAL;
+ conf->priorities = NULL;
+ conf->push_diary_size = DEF_VAL;
+
+ return conf;
+}
+
+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
+{
+ return h2_config_create(pool, "srv", s->defn_name);
+}
+
+void *h2_config_create_dir(apr_pool_t *pool, char *x)
+{
+ return h2_config_create(pool, "dir", x);
+}
+
+void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
+{
+ h2_config *base = (h2_config *)basev;
+ h2_config *add = (h2_config *)addv;
+ h2_config *n = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
+
+ char *name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
+ n->name = name;
+
+ n->h2_max_streams = H2_CONFIG_GET(add, base, h2_max_streams);
+ n->h2_window_size = H2_CONFIG_GET(add, base, h2_window_size);
+ n->min_workers = H2_CONFIG_GET(add, base, min_workers);
+ n->max_workers = H2_CONFIG_GET(add, base, max_workers);
+ n->max_worker_idle_secs = H2_CONFIG_GET(add, base, max_worker_idle_secs);
+ n->stream_max_mem_size = H2_CONFIG_GET(add, base, stream_max_mem_size);
+ n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs;
+ n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
+ n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers);
+ n->h2_direct = H2_CONFIG_GET(add, base, h2_direct);
+ n->session_extra_files = H2_CONFIG_GET(add, base, session_extra_files);
+ n->modern_tls_only = H2_CONFIG_GET(add, base, modern_tls_only);
+ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
+ n->tls_warmup_size = H2_CONFIG_GET(add, base, tls_warmup_size);
+ n->tls_cooldown_secs = H2_CONFIG_GET(add, base, tls_cooldown_secs);
+ n->h2_push = H2_CONFIG_GET(add, base, h2_push);
+ if (add->priorities && base->priorities) {
+ n->priorities = apr_hash_overlay(pool, add->priorities, base->priorities);
+ }
+ else {
+ n->priorities = add->priorities? add->priorities : base->priorities;
+ }
+ n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size);
+
+ return n;
+}
+
+int h2_config_geti(const h2_config *conf, h2_config_var_t var)
+{
+ return (int)h2_config_geti64(conf, var);
+}
+
+apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
+{
+ switch(var) {
+ case H2_CONF_MAX_STREAMS:
+ return H2_CONFIG_GET(conf, &defconf, h2_max_streams);
+ case H2_CONF_WIN_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, h2_window_size);
+ case H2_CONF_MIN_WORKERS:
+ return H2_CONFIG_GET(conf, &defconf, min_workers);
+ case H2_CONF_MAX_WORKERS:
+ return H2_CONFIG_GET(conf, &defconf, max_workers);
+ case H2_CONF_MAX_WORKER_IDLE_SECS:
+ return H2_CONFIG_GET(conf, &defconf, max_worker_idle_secs);
+ case H2_CONF_STREAM_MAX_MEM:
+ return H2_CONFIG_GET(conf, &defconf, stream_max_mem_size);
+ case H2_CONF_ALT_SVC_MAX_AGE:
+ return H2_CONFIG_GET(conf, &defconf, alt_svc_max_age);
+ case H2_CONF_SER_HEADERS:
+ return H2_CONFIG_GET(conf, &defconf, serialize_headers);
+ case H2_CONF_MODERN_TLS_ONLY:
+ return H2_CONFIG_GET(conf, &defconf, modern_tls_only);
+ case H2_CONF_UPGRADE:
+ return H2_CONFIG_GET(conf, &defconf, h2_upgrade);
+ case H2_CONF_DIRECT:
+ return H2_CONFIG_GET(conf, &defconf, h2_direct);
+ case H2_CONF_SESSION_FILES:
+ return H2_CONFIG_GET(conf, &defconf, session_extra_files);
+ case H2_CONF_TLS_WARMUP_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
+ case H2_CONF_TLS_COOLDOWN_SECS:
+ return H2_CONFIG_GET(conf, &defconf, tls_cooldown_secs);
+ case H2_CONF_PUSH:
+ return H2_CONFIG_GET(conf, &defconf, h2_push);
+ case H2_CONF_PUSH_DIARY_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, push_diary_size);
+ default:
+ return DEF_VAL;
+ }
+}
+
+const h2_config *h2_config_sget(server_rec *s)
+{
+ h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config,
+ &http2_module);
+ AP_DEBUG_ASSERT(cfg);
+ return cfg;
+}
+
+const struct h2_priority *h2_config_get_priority(const h2_config *conf,
+ const char *content_type)
+{
+ if (content_type && conf->priorities) {
+ size_t len = strcspn(content_type, "; \t");
+ h2_priority *prio = apr_hash_get(conf->priorities, content_type, len);
+ return prio? prio : apr_hash_get(conf->priorities, "*", 1);
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_max_streams(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->h2_max_streams = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->h2_max_streams < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_window_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->h2_window_size = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->h2_window_size < 1024) {
+ return "value must be >= 1024";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_min_workers(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->min_workers = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->min_workers < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_max_workers(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->max_workers = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->max_workers < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->max_worker_idle_secs = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->max_worker_idle_secs < 1) {
+ return "value must be > 0";
+ }
+ return NULL;
+}
+
+static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+
+
+ cfg->stream_max_mem_size = (int)apr_atoi64(value);
+ (void)arg;
+ if (cfg->stream_max_mem_size < 1024) {
+ return "value must be >= 1024";
+ }
+ return NULL;
+}
+
+static const char *h2_add_alt_svc(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ if (value && strlen(value)) {
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool);
+ if (!as) {
+ return "unable to parse alt-svc specifier";
+ }
+ if (!cfg->alt_svcs) {
+ cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*));
+ }
+ APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
+ }
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->alt_svc_max_age = (int)apr_atoi64(value);
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_session_extra_files(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ apr_int64_t max = (int)apr_atoi64(value);
+ if (max < 0) {
+ return "value must be a non-negative number";
+ }
+ cfg->session_extra_files = (int)max;
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->serialize_headers = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->serialize_headers = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_direct(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->h2_direct = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->h2_direct = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_push(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->h2_push = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->h2_push = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg,
+ const char *ctype, const char *sdependency,
+ const char *sweight)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(cmd->server);
+ const char *sdefweight = "16"; /* default AFTER weight */
+ h2_dependency dependency;
+ h2_priority *priority;
+ int weight;
+
+ if (!strlen(ctype)) {
+ return "1st argument must be a mime-type, like 'text/css' or '*'";
+ }
+
+ if (!sweight) {
+ /* 2 args only, but which one? */
+ if (apr_isdigit(sdependency[0])) {
+ sweight = sdependency;
+ sdependency = "AFTER"; /* default dependency */
+ }
+ }
+
+ if (!strcasecmp("AFTER", sdependency)) {
+ dependency = H2_DEPENDANT_AFTER;
+ }
+ else if (!strcasecmp("BEFORE", sdependency)) {
+ dependency = H2_DEPENDANT_BEFORE;
+ if (sweight) {
+ return "dependecy 'Before' does not allow a weight";
+ }
+ }
+ else if (!strcasecmp("INTERLEAVED", sdependency)) {
+ dependency = H2_DEPENDANT_INTERLEAVED;
+ sdefweight = "256"; /* default INTERLEAVED weight */
+ }
+ else {
+ return "dependency must be one of 'After', 'Before' or 'Interleaved'";
+ }
+
+ weight = (int)apr_atoi64(sweight? sweight : sdefweight);
+ if (weight < NGHTTP2_MIN_WEIGHT) {
+ return apr_psprintf(cmd->pool, "weight must be a number >= %d",
+ NGHTTP2_MIN_WEIGHT);
+ }
+
+ priority = apr_pcalloc(cmd->pool, sizeof(*priority));
+ priority->dependency = dependency;
+ priority->weight = weight;
+
+ if (!cfg->priorities) {
+ cfg->priorities = apr_hash_make(cmd->pool);
+ }
+ apr_hash_set(cfg->priorities, ctype, strlen(ctype), priority);
+ return NULL;
+}
+
+static const char *h2_conf_set_modern_tls_only(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->modern_tls_only = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->modern_tls_only = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_upgrade(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->h2_upgrade = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->h2_upgrade = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->tls_warmup_size = apr_atoi64(value);
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ cfg->tls_cooldown_secs = (int)apr_atoi64(value);
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_push_diary_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ (void)arg;
+ cfg->push_diary_size = (int)apr_atoi64(value);
+ if (cfg->push_diary_size < 0) {
+ return "value must be >= 0";
+ }
+ if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) {
+ return "value must a power of 2";
+ }
+ if (cfg->push_diary_size > (1 << 15)) {
+ return "value must <= 65536";
+ }
+ return NULL;
+}
+
+#define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL)
+
+const command_rec h2_cmds[] = {
+ AP_INIT_TAKE1("H2MaxSessionStreams", h2_conf_set_max_streams, NULL,
+ RSRC_CONF, "maximum number of open streams per session"),
+ AP_INIT_TAKE1("H2WindowSize", h2_conf_set_window_size, NULL,
+ RSRC_CONF, "window size on client DATA"),
+ AP_INIT_TAKE1("H2MinWorkers", h2_conf_set_min_workers, NULL,
+ RSRC_CONF, "minimum number of worker threads per child"),
+ AP_INIT_TAKE1("H2MaxWorkers", h2_conf_set_max_workers, NULL,
+ RSRC_CONF, "maximum number of worker threads per child"),
+ AP_INIT_TAKE1("H2MaxWorkerIdleSeconds", h2_conf_set_max_worker_idle_secs, NULL,
+ RSRC_CONF, "maximum number of idle seconds before a worker shuts down"),
+ AP_INIT_TAKE1("H2StreamMaxMemSize", h2_conf_set_stream_max_mem_size, NULL,
+ RSRC_CONF, "maximum number of bytes buffered in memory for a stream"),
+ AP_INIT_TAKE1("H2AltSvc", h2_add_alt_svc, NULL,
+ RSRC_CONF, "adds an Alt-Svc for this server"),
+ AP_INIT_TAKE1("H2AltSvcMaxAge", h2_conf_set_alt_svc_max_age, NULL,
+ RSRC_CONF, "set the maximum age (in seconds) that client can rely on alt-svc information"),
+ AP_INIT_TAKE1("H2SerializeHeaders", h2_conf_set_serialize_headers, NULL,
+ RSRC_CONF, "on to enable header serialization for compatibility"),
+ AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL,
+ RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"),
+ AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL,
+ RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"),
+ AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
+ RSRC_CONF, "on to enable direct HTTP/2 mode"),
+ AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
+ RSRC_CONF, "number of extra file a session might keep open"),
+ AP_INIT_TAKE1("H2TLSWarmUpSize", h2_conf_set_tls_warmup_size, NULL,
+ RSRC_CONF, "number of bytes on TLS connection before doing max writes"),
+ AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL,
+ RSRC_CONF, "seconds of idle time on TLS before shrinking writes"),
+ AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL,
+ RSRC_CONF, "off to disable HTTP/2 server push"),
+ AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL,
+ RSRC_CONF, "define priority of PUSHed resources per content type"),
+ AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
+ RSRC_CONF, "size of push diary"),
+ AP_END_CMD
+};
+
+
+const h2_config *h2_config_rget(request_rec *r)
+{
+ h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config,
+ &http2_module);
+ return cfg? cfg : h2_config_sget(r->server);
+}
+
+const h2_config *h2_config_get(conn_rec *c)
+{
+ h2_ctx *ctx = h2_ctx_get(c, 0);
+
+ if (ctx) {
+ if (ctx->config) {
+ return ctx->config;
+ }
+ else if (ctx->server) {
+ ctx->config = h2_config_sget(ctx->server);
+ return ctx->config;
+ }
+ }
+
+ return h2_config_sget(c->base_server);
+}
diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h
new file mode 100644
index 00000000..92b222f9
--- /dev/null
+++ b/modules/http2/h2_config.h
@@ -0,0 +1,95 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_config_h__
+#define __mod_h2__h2_config_h__
+
+#undef PACKAGE_VERSION
+#undef PACKAGE_TARNAME
+#undef PACKAGE_STRING
+#undef PACKAGE_NAME
+#undef PACKAGE_BUGREPORT
+
+typedef enum {
+ H2_CONF_MAX_STREAMS,
+ H2_CONF_WIN_SIZE,
+ H2_CONF_MIN_WORKERS,
+ H2_CONF_MAX_WORKERS,
+ H2_CONF_MAX_WORKER_IDLE_SECS,
+ H2_CONF_STREAM_MAX_MEM,
+ H2_CONF_ALT_SVCS,
+ H2_CONF_ALT_SVC_MAX_AGE,
+ H2_CONF_SER_HEADERS,
+ H2_CONF_DIRECT,
+ H2_CONF_SESSION_FILES,
+ H2_CONF_MODERN_TLS_ONLY,
+ H2_CONF_UPGRADE,
+ H2_CONF_TLS_WARMUP_SIZE,
+ H2_CONF_TLS_COOLDOWN_SECS,
+ H2_CONF_PUSH,
+ H2_CONF_PUSH_DIARY_SIZE,
+} h2_config_var_t;
+
+struct apr_hash_t;
+struct h2_priority;
+
+/* Apache httpd module configuration for h2. */
+typedef struct h2_config {
+ const char *name;
+ int h2_max_streams; /* max concurrent # streams (http2) */
+ int h2_window_size; /* stream window size (http2) */
+ int min_workers; /* min # of worker threads/child */
+ int max_workers; /* max # of worker threads/child */
+ int max_worker_idle_secs; /* max # of idle seconds for worker */
+ int stream_max_mem_size; /* max # bytes held in memory/stream */
+ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
+ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
+ int serialize_headers; /* Use serialized HTTP/1.1 headers for
+ processing, better compatibility */
+ int h2_direct; /* if mod_h2 is active directly */
+ int session_extra_files; /* # of extra files a session may keep open */
+ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
+ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
+ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
+ int h2_push; /* if HTTP/2 server push is enabled */
+ struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
+
+ int push_diary_size; /* # of entries in push diary */
+} h2_config;
+
+
+void *h2_config_create_dir(apr_pool_t *pool, char *x);
+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s);
+void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv);
+
+apr_status_t h2_config_apply_header(const h2_config *config, request_rec *r);
+
+extern const command_rec h2_cmds[];
+
+const h2_config *h2_config_get(conn_rec *c);
+const h2_config *h2_config_sget(server_rec *s);
+const h2_config *h2_config_rget(request_rec *r);
+
+int h2_config_geti(const h2_config *conf, h2_config_var_t var);
+apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var);
+
+void h2_config_init(apr_pool_t *pool);
+
+const struct h2_priority *h2_config_get_priority(const h2_config *conf,
+ const char *content_type);
+
+#endif /* __mod_h2__h2_config_h__ */
+
diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
new file mode 100644
index 00000000..4ddf1b70
--- /dev/null
+++ b/modules/http2/h2_conn.c
@@ -0,0 +1,332 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <ap_mpm.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_filter.h"
+#include "h2_mplx.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_h2.h"
+#include "h2_task.h"
+#include "h2_worker.h"
+#include "h2_workers.h"
+#include "h2_conn.h"
+#include "h2_version.h"
+
+static struct h2_workers *workers;
+
+static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN;
+static module *mpm_module;
+static int async_mpm;
+static apr_socket_t *dummy_socket;
+
+static void check_modules(int force)
+{
+ static int checked = 0;
+ int i;
+
+ if (force || !checked) {
+ for (i = 0; ap_loaded_modules[i]; ++i) {
+ module *m = ap_loaded_modules[i];
+
+ if (!strcmp("event.c", m->name)) {
+ mpm_type = H2_MPM_EVENT;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("motorz.c", m->name)) {
+ mpm_type = H2_MPM_MOTORZ;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("mpm_netware.c", m->name)) {
+ mpm_type = H2_MPM_NETWARE;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("prefork.c", m->name)) {
+ mpm_type = H2_MPM_PREFORK;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("simple_api.c", m->name)) {
+ mpm_type = H2_MPM_SIMPLE;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("mpm_winnt.c", m->name)) {
+ mpm_type = H2_MPM_WINNT;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("worker.c", m->name)) {
+ mpm_type = H2_MPM_WORKER;
+ mpm_module = m;
+ break;
+ }
+ }
+ checked = 1;
+ }
+}
+
+apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s)
+{
+ const h2_config *config = h2_config_sget(s);
+ apr_status_t status = APR_SUCCESS;
+ int minw, maxw, max_tx_handles, n;
+ int max_threads_per_child = 0;
+ int idle_secs = 0;
+
+ check_modules(1);
+
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child);
+
+ status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm);
+ if (status != APR_SUCCESS) {
+ /* some MPMs do not implemnent this */
+ async_mpm = 0;
+ status = APR_SUCCESS;
+ }
+
+ h2_config_init(pool);
+
+ minw = h2_config_geti(config, H2_CONF_MIN_WORKERS);
+ maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS);
+ if (minw <= 0) {
+ minw = max_threads_per_child;
+ }
+ if (maxw <= 0) {
+ maxw = minw;
+ }
+
+ /* How many file handles is it safe to use for transfer
+ * to the master connection to be streamed out?
+ * Is there a portable APR rlimit on NOFILES? Have not
+ * found it. And if, how many of those would we set aside?
+ * This leads all into a process wide handle allocation strategy
+ * which ultimately would limit the number of accepted connections
+ * with the assumption of implicitly reserving n handles for every
+ * connection and requiring modules with excessive needs to allocate
+ * from a central pool.
+ */
+ n = h2_config_geti(config, H2_CONF_SESSION_FILES);
+ if (n < 0) {
+ max_tx_handles = maxw * 2;
+ }
+ else {
+ max_tx_handles = maxw * n;
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: min=%d max=%d, mthrpchild=%d, tx_files=%d",
+ minw, maxw, max_threads_per_child, max_tx_handles);
+ workers = h2_workers_create(s, pool, minw, maxw, max_tx_handles);
+
+ idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
+ h2_workers_set_max_idle_secs(workers, idle_secs);
+
+ ap_register_input_filter("H2_IN", h2_filter_core_input,
+ NULL, AP_FTYPE_CONNECTION);
+
+ status = h2_mplx_child_init(pool, s);
+
+ if (status == APR_SUCCESS) {
+ status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
+ APR_PROTO_TCP, pool);
+ }
+
+ return status;
+}
+
+h2_mpm_type_t h2_conn_mpm_type(void)
+{
+ check_modules(0);
+ return mpm_type;
+}
+
+static module *h2_conn_mpm_module(void)
+{
+ check_modules(0);
+ return mpm_module;
+}
+
+apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
+{
+ h2_session *session;
+
+ if (!workers) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911)
+ "workers not initialized");
+ return APR_EGENERAL;
+ }
+
+ if (r) {
+ session = h2_session_rcreate(r, ctx, workers);
+ }
+ else {
+ session = h2_session_create(c, ctx, workers);
+ }
+
+ h2_ctx_session_set(ctx, session);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
+{
+ apr_status_t status;
+ int mpm_state = 0;
+
+ do {
+ if (c->cs) {
+ c->cs->sense = CONN_SENSE_DEFAULT;
+ }
+ status = h2_session_process(h2_ctx_session_get(ctx), async_mpm);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03045)
+ "h2_session(%ld): process, closing conn", c->id);
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+ } while (!async_mpm
+ && c->keepalive == AP_CONN_KEEPALIVE
+ && mpm_state != AP_MPMQ_STOPPING);
+
+ return DONE;
+}
+
+apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
+{
+ apr_status_t status;
+
+ status = h2_session_pre_close(h2_ctx_session_get(ctx), async_mpm);
+ if (status == APR_SUCCESS) {
+ return DONE; /* This is the same, right? */
+ }
+ return status;
+}
+
+conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
+ apr_allocator_t *allocator)
+{
+ apr_pool_t *pool;
+ conn_rec *c;
+ void *cfg;
+
+ AP_DEBUG_ASSERT(master);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
+ "h2_conn(%ld): create slave", master->id);
+
+ /* We create a pool with its own allocator to be used for
+ * processing a request. This is the only way to have the processing
+ * independant of its parent pool in the sense that it can work in
+ * another thread.
+ */
+ if (!allocator) {
+ apr_allocator_create(&allocator);
+ }
+ apr_pool_create_ex(&pool, parent, NULL, allocator);
+ apr_pool_tag(pool, "h2_slave_conn");
+ apr_allocator_owner_set(allocator, pool);
+
+ c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
+ if (c == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
+ APLOGNO(02913) "h2_task: creating conn");
+ return NULL;
+ }
+
+ memcpy(c, master, sizeof(conn_rec));
+
+ /* Replace these */
+ c->master = master;
+ c->pool = pool;
+ c->conn_config = ap_create_conn_config(pool);
+ c->notes = apr_table_make(pool, 5);
+ c->input_filters = NULL;
+ c->output_filters = NULL;
+ c->bucket_alloc = apr_bucket_alloc_create(pool);
+ c->data_in_input_filters = 0;
+ c->data_in_output_filters = 0;
+ c->clogging_input_filters = 1;
+ c->log = NULL;
+ c->log_id = NULL;
+ /* Simulate that we had already a request on this connection. */
+ c->keepalives = 1;
+ /* We cannot install the master connection socket on the slaves, as
+ * modules mess with timeouts/blocking of the socket, with
+ * unwanted side effects to the master connection processing.
+ * Fortunately, since we never use the slave socket, we can just install
+ * a single, process-wide dummy and everyone is happy.
+ */
+ ap_set_module_config(c->conn_config, &core_module, dummy_socket);
+ /* TODO: these should be unique to this thread */
+ c->sbh = master->sbh;
+ /* TODO: not all mpm modules have learned about slave connections yet.
+ * copy their config from master to slave.
+ */
+ if (h2_conn_mpm_module()) {
+ cfg = ap_get_module_config(master->conn_config, h2_conn_mpm_module());
+ ap_set_module_config(c->conn_config, h2_conn_mpm_module(), cfg);
+ }
+
+ return c;
+}
+
+void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator)
+{
+ apr_pool_t *parent;
+ apr_allocator_t *allocator = apr_pool_allocator_get(slave->pool);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave,
+ "h2_slave_conn(%ld): destroy (task=%s)", slave->id,
+ apr_table_get(slave->notes, H2_TASK_ID_NOTE));
+ /* Attache the allocator to the parent pool and return it for
+ * reuse, otherwise the own is still the slave pool and it will
+ * get destroyed with it. */
+ parent = apr_pool_parent_get(slave->pool);
+ if (pallocator && parent) {
+ apr_allocator_owner_set(allocator, parent);
+ *pallocator = allocator;
+ }
+ apr_pool_destroy(slave->pool);
+}
+
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
+{
+ return ap_run_pre_connection(slave, csd);
+}
+
diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_conn.h
new file mode 100644
index 00000000..e52fc8d6
--- /dev/null
+++ b/modules/http2/h2_conn.h
@@ -0,0 +1,76 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_conn__
+#define __mod_h2__h2_conn__
+
+struct h2_ctx;
+struct h2_task;
+
+/**
+ * Setup the connection and our context for HTTP/2 processing
+ *
+ * @param ctx the http2 context to setup
+ * @param c the connection HTTP/2 is starting on
+ * @param r the upgrade request that still awaits an answer, optional
+ */
+apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r);
+
+/**
+ * Run the HTTP/2 connection in synchronous fashion.
+ * Return when the HTTP/2 session is done
+ * and the connection will close or a fatal error occured.
+ *
+ * @param ctx the http2 context to run
+ * @return APR_SUCCESS when session is done.
+ */
+apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c);
+
+/**
+ * The connection is about to close. If we have not send a GOAWAY
+ * yet, this is the last chance.
+ */
+apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c);
+
+/* Initialize this child process for h2 connection work,
+ * to be called once during child init before multi processing
+ * starts.
+ */
+apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s);
+
+
+typedef enum {
+ H2_MPM_UNKNOWN,
+ H2_MPM_WORKER,
+ H2_MPM_EVENT,
+ H2_MPM_PREFORK,
+ H2_MPM_MOTORZ,
+ H2_MPM_SIMPLE,
+ H2_MPM_NETWARE,
+ H2_MPM_WINNT,
+} h2_mpm_type_t;
+
+/* Returns the type of MPM module detected */
+h2_mpm_type_t h2_conn_mpm_type(void);
+
+
+conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
+ apr_allocator_t *allocator);
+void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator);
+
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
+void h2_slave_run_connection(conn_rec *slave);
+
+#endif /* defined(__mod_h2__h2_conn__) */
diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c
new file mode 100644
index 00000000..df4aec14
--- /dev/null
+++ b/modules/http2/h2_conn_io.c
@@ -0,0 +1,422 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+#include <ap_mpm.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_request.h>
+
+#include "h2_private.h"
+#include "h2_bucket_eoc.h"
+#include "h2_bucket_eos.h"
+#include "h2_config.h"
+#include "h2_conn_io.h"
+#include "h2_h2.h"
+#include "h2_session.h"
+#include "h2_util.h"
+
+#define TLS_DATA_MAX (16*1024)
+
+/* Calculated like this: assuming MTU 1500 bytes
+ * 1500 - 40 (IP) - 20 (TCP) - 40 (TCP options)
+ * - TLS overhead (60-100)
+ * ~= 1300 bytes */
+#define WRITE_SIZE_INITIAL 1300
+/* Calculated like this: max TLS record size 16*1024
+ * - 40 (IP) - 20 (TCP) - 40 (TCP options)
+ * - TLS overhead (60-100)
+ * which seems to create less TCP packets overall
+ */
+#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100)
+
+
+static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
+ const char *tag, apr_bucket_brigade *bb)
+{
+ char buffer[16 * 1024];
+ const char *line = "(null)";
+ apr_size_t bmax = sizeof(buffer)/sizeof(buffer[0]);
+ int off = 0;
+ apr_bucket *b;
+
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ for (b = APR_BRIGADE_FIRST(bb);
+ bmax && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eos ");
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "flush ");
+ }
+ else if (AP_BUCKET_IS_EOR(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eor ");
+ }
+ else if (H2_BUCKET_IS_H2EOC(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "h2eoc ");
+ }
+ else if (H2_BUCKET_IS_H2EOS(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "h2eos ");
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "meta(unknown) ");
+ }
+ }
+ else {
+ const char *btype = "data";
+ if (APR_BUCKET_IS_FILE(b)) {
+ btype = "file";
+ }
+ else if (APR_BUCKET_IS_PIPE(b)) {
+ btype = "pipe";
+ }
+ else if (APR_BUCKET_IS_SOCKET(b)) {
+ btype = "socket";
+ }
+ else if (APR_BUCKET_IS_HEAP(b)) {
+ btype = "heap";
+ }
+ else if (APR_BUCKET_IS_TRANSIENT(b)) {
+ btype = "transient";
+ }
+ else if (APR_BUCKET_IS_IMMORTAL(b)) {
+ btype = "immortal";
+ }
+#if APR_HAS_MMAP
+ else if (APR_BUCKET_IS_MMAP(b)) {
+ btype = "mmap";
+ }
+#endif
+ else if (APR_BUCKET_IS_POOL(b)) {
+ btype = "pool";
+ }
+
+ off += apr_snprintf(buffer+off, bmax-off, "%s[%ld] ",
+ btype,
+ (long)(b->length == ((apr_size_t)-1)?
+ -1 : b->length));
+ }
+ }
+ line = *buffer? buffer : "(empty)";
+ }
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, level, 0, c, "bb_dump(%ld-%d)-%s: %s",
+ c->id, stream_id, tag, line);
+
+}
+
+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
+ const h2_config *cfg)
+{
+ io->c = c;
+ io->output = apr_brigade_create(c->pool, c->bucket_alloc);
+ io->is_tls = h2_h2_is_tls(c);
+ io->buffer_output = io->is_tls;
+ io->pass_threshold = h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM) / 2;
+
+ if (io->is_tls) {
+ /* This is what we start with,
+ * see https://issues.apache.org/jira/browse/TS-2503
+ */
+ io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE);
+ io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS)
+ * APR_USEC_PER_SEC);
+ io->write_size = (io->cooldown_usecs > 0?
+ WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
+ }
+ else {
+ io->warmup_size = 0;
+ io->cooldown_usecs = 0;
+ io->write_size = 0;
+ }
+
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
+ "h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, "
+ "cd_secs=%f", io->c->id, io->buffer_output,
+ (long)io->warmup_size,
+ ((float)io->cooldown_usecs/APR_USEC_PER_SEC));
+ }
+
+ return APR_SUCCESS;
+}
+
+#define LOG_SCRATCH 0
+
+static void append_scratch(h2_conn_io *io)
+{
+ if (io->scratch && io->slen > 0) {
+ apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen,
+ apr_bucket_free,
+ io->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03386)
+ "h2_conn_io(%ld): append_scratch(%ld)",
+ io->c->id, (long)io->slen);
+#endif
+ io->scratch = NULL;
+ io->slen = io->ssize = 0;
+ }
+}
+
+static apr_size_t assure_scratch_space(h2_conn_io *io) {
+ apr_size_t remain = io->ssize - io->slen;
+ if (io->scratch && remain == 0) {
+ append_scratch(io);
+ }
+ if (!io->scratch) {
+ /* we control the size and it is larger than what buckets usually
+ * allocate. */
+ io->scratch = apr_bucket_alloc(io->write_size, io->c->bucket_alloc);
+ io->ssize = io->write_size;
+ io->slen = 0;
+ remain = io->ssize;
+ }
+ return remain;
+}
+
+static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b)
+{
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ if (!b->length) {
+ return APR_SUCCESS;
+ }
+
+ AP_DEBUG_ASSERT(b->length <= (io->ssize - io->slen));
+ if (APR_BUCKET_IS_FILE(b)) {
+ apr_bucket_file *f = (apr_bucket_file *)b->data;
+ apr_file_t *fd = f->fd;
+ apr_off_t offset = b->start;
+ apr_size_t len = b->length;
+
+ /* file buckets will either mmap (which we do not want) or
+ * read 8000 byte chunks and split themself. However, we do
+ * know *exactly* how many bytes we need where.
+ */
+ status = apr_file_seek(fd, APR_SET, &offset);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ status = apr_file_read(fd, io->scratch + io->slen, &len);
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->c, APLOGNO(03387)
+ "h2_conn_io(%ld): FILE_to_scratch(%ld)",
+ io->c->id, (long)len);
+#endif
+ if (status != APR_SUCCESS && status != APR_EOF) {
+ return status;
+ }
+ io->slen += len;
+ }
+ else {
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03388)
+ "h2_conn_io(%ld): read_to_scratch(%ld)",
+ io->c->id, (long)b->length);
+#endif
+ memcpy(io->scratch+io->slen, data, len);
+ io->slen += len;
+ }
+ }
+ return status;
+}
+
+static void check_write_size(h2_conn_io *io)
+{
+ if (io->write_size > WRITE_SIZE_INITIAL
+ && (io->cooldown_usecs > 0)
+ && (apr_time_now() - io->last_write) >= io->cooldown_usecs) {
+ /* long time not written, reset write size */
+ io->write_size = WRITE_SIZE_INITIAL;
+ io->bytes_written = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
+ "h2_conn_io(%ld): timeout write size reset to %ld",
+ (long)io->c->id, (long)io->write_size);
+ }
+ else if (io->write_size < WRITE_SIZE_MAX
+ && io->bytes_written >= io->warmup_size) {
+ /* connection is hot, use max size */
+ io->write_size = WRITE_SIZE_MAX;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
+ "h2_conn_io(%ld): threshold reached, write size now %ld",
+ (long)io->c->id, (long)io->write_size);
+ }
+}
+
+static apr_status_t pass_output(h2_conn_io *io, int flush, int eoc)
+{
+ conn_rec *c = io->c;
+ apr_bucket *b;
+ apr_off_t bblen;
+ apr_status_t status;
+
+ append_scratch(io);
+ if (flush) {
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+
+ if (APR_BRIGADE_EMPTY(io->output)) {
+ return APR_SUCCESS;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, "h2_conn_io: pass_output");
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL);
+ apr_brigade_length(io->output, 0, &bblen);
+
+ h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", io->output);
+ status = ap_pass_brigade(c->output_filters, io->output);
+
+ /* careful with access after this, as we might have flushed an EOC bucket
+ * that de-allocated us all. */
+ if (!eoc) {
+ apr_brigade_cleanup(io->output);
+ if (status == APR_SUCCESS) {
+ io->bytes_written += (apr_size_t)bblen;
+ io->last_write = apr_time_now();
+ }
+ }
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
+ "h2_conn_io(%ld): pass_out brigade %ld bytes",
+ c->id, (long)bblen);
+ }
+ return status;
+}
+
+apr_status_t h2_conn_io_flush(h2_conn_io *io)
+{
+ return pass_output(io, 1, 0);
+}
+
+apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, h2_session *session)
+{
+ apr_bucket *b = h2_bucket_eoc_create(io->c->bucket_alloc, session);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ return pass_output(io, 1, 1);
+}
+
+apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t remain;
+
+ if (io->buffer_output) {
+ while (length > 0) {
+ remain = assure_scratch_space(io);
+ if (remain >= length) {
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03389)
+ "h2_conn_io(%ld): write_to_scratch(%ld)",
+ io->c->id, (long)length);
+#endif
+ memcpy(io->scratch + io->slen, data, length);
+ io->slen += length;
+ length = 0;
+ }
+ else {
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03390)
+ "h2_conn_io(%ld): write_to_scratch(%ld)",
+ io->c->id, (long)remain);
+#endif
+ memcpy(io->scratch + io->slen, data, remain);
+ io->slen += remain;
+ data += remain;
+ length -= remain;
+ }
+ }
+ }
+ else {
+ status = apr_brigade_write(io->output, NULL, NULL, data, length);
+ }
+ return status;
+}
+
+apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+
+ check_write_size(io);
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* need to finish any open scratch bucket, as meta data
+ * needs to be forward "in order". */
+ append_scratch(io);
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ else if (io->buffer_output) {
+ apr_size_t remain = assure_scratch_space(io);
+ if (b->length > remain) {
+ apr_bucket_split(b, remain);
+ if (io->slen == 0) {
+ /* complete write_size bucket, append unchanged */
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+#if LOG_SCRATCH
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03391)
+ "h2_conn_io(%ld): pass bucket(%ld)",
+ io->c->id, (long)b->length);
+#endif
+ continue;
+ }
+ }
+ else {
+ /* bucket fits in remain, copy to scratch */
+ status = read_to_scratch(io, b);
+ apr_bucket_delete(b);
+ continue;
+ }
+ }
+ else {
+ /* no buffering, forward buckets setaside on flush */
+ if (APR_BUCKET_IS_TRANSIENT(b)) {
+ apr_bucket_setaside(b, io->c->pool);
+ }
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ if (!APR_BRIGADE_EMPTY(io->output)) {
+ apr_off_t len = h2_brigade_mem_size(io->output);
+ if (len >= io->pass_threshold) {
+ return pass_output(io, 0, 0);
+ }
+ }
+ }
+ return status;
+}
+
diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_conn_io.h
new file mode 100644
index 00000000..4ccf0070
--- /dev/null
+++ b/modules/http2/h2_conn_io.h
@@ -0,0 +1,76 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_conn_io__
+#define __mod_h2__h2_conn_io__
+
+struct h2_config;
+struct h2_session;
+
+/* h2_io is the basic handler of a httpd connection. It keeps two brigades,
+ * one for input, one for output and works with the installed connection
+ * filters.
+ * The read is done via a callback function, so that input can be processed
+ * directly without copying.
+ */
+typedef struct {
+ conn_rec *c;
+ apr_bucket_brigade *output;
+
+ int is_tls;
+ apr_time_t cooldown_usecs;
+ apr_int64_t warmup_size;
+
+ apr_size_t write_size;
+ apr_time_t last_write;
+ apr_int64_t bytes_read;
+ apr_int64_t bytes_written;
+
+ int buffer_output;
+ apr_size_t pass_threshold;
+
+ char *scratch;
+ apr_size_t ssize;
+ apr_size_t slen;
+} h2_conn_io;
+
+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
+ const struct h2_config *cfg);
+
+/**
+ * Append data to the buffered output.
+ * @param buf the data to append
+ * @param length the length of the data to append
+ */
+apr_status_t h2_conn_io_write(h2_conn_io *io,
+ const char *buf,
+ size_t length);
+
+apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb);
+
+/**
+ * Append an End-Of-Connection bucket to the output that, once destroyed,
+ * will tear down the complete http2 session.
+ */
+apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, struct h2_session *session);
+
+/**
+ * Pass any buffered data on to the connection output filters.
+ * @param io the connection io
+ * @param flush if a flush bucket should be appended to any output
+ */
+apr_status_t h2_conn_io_flush(h2_conn_io *io);
+
+#endif /* defined(__mod_h2__h2_conn_io__) */
diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c
new file mode 100644
index 00000000..4b596a3d
--- /dev/null
+++ b/modules/http2/h2_ctx.c
@@ -0,0 +1,120 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+
+#include "h2_private.h"
+#include "h2_session.h"
+#include "h2_task.h"
+#include "h2_ctx.h"
+
+static h2_ctx *h2_ctx_create(const conn_rec *c)
+{
+ h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
+ AP_DEBUG_ASSERT(ctx);
+ ap_set_module_config(c->conn_config, &http2_module, ctx);
+ h2_ctx_server_set(ctx, c->base_server);
+ return ctx;
+}
+
+void h2_ctx_clear(const conn_rec *c)
+{
+ AP_DEBUG_ASSERT(c);
+ ap_set_module_config(c->conn_config, &http2_module, NULL);
+}
+
+h2_ctx *h2_ctx_create_for(const conn_rec *c, h2_task *task)
+{
+ h2_ctx *ctx = h2_ctx_create(c);
+ if (ctx) {
+ ctx->task = task;
+ }
+ return ctx;
+}
+
+h2_ctx *h2_ctx_get(const conn_rec *c, int create)
+{
+ h2_ctx *ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module);
+ if (ctx == NULL && create) {
+ ctx = h2_ctx_create(c);
+ }
+ return ctx;
+}
+
+h2_ctx *h2_ctx_rget(const request_rec *r)
+{
+ return h2_ctx_get(r->connection, 0);
+}
+
+const char *h2_ctx_protocol_get(const conn_rec *c)
+{
+ h2_ctx *ctx;
+ if (c->master) {
+ c = c->master;
+ }
+ ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module);
+ return ctx? ctx->protocol : NULL;
+}
+
+h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto)
+{
+ ctx->protocol = proto;
+ return ctx;
+}
+
+h2_session *h2_ctx_session_get(h2_ctx *ctx)
+{
+ return ctx? ctx->session : NULL;
+}
+
+void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session)
+{
+ ctx->session = session;
+}
+
+server_rec *h2_ctx_server_get(h2_ctx *ctx)
+{
+ return ctx? ctx->server : NULL;
+}
+
+h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s)
+{
+ ctx->server = s;
+ return ctx;
+}
+
+int h2_ctx_is_task(h2_ctx *ctx)
+{
+ return ctx && ctx->task;
+}
+
+h2_task *h2_ctx_get_task(h2_ctx *ctx)
+{
+ return ctx? ctx->task : NULL;
+}
+
+h2_task *h2_ctx_cget_task(conn_rec *c)
+{
+ return h2_ctx_get_task(h2_ctx_get(c, 0));
+}
+
+h2_task *h2_ctx_rget_task(request_rec *r)
+{
+ return h2_ctx_get_task(h2_ctx_rget(r));
+}
diff --git a/modules/http2/h2_ctx.h b/modules/http2/h2_ctx.h
new file mode 100644
index 00000000..3b2c842c
--- /dev/null
+++ b/modules/http2/h2_ctx.h
@@ -0,0 +1,77 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_ctx__
+#define __mod_h2__h2_ctx__
+
+struct h2_session;
+struct h2_task;
+struct h2_config;
+
+/**
+ * The h2 module context associated with a connection.
+ *
+ * It keeps track of the different types of connections:
+ * - those from clients that use HTTP/2 protocol
+ * - those from clients that do not use HTTP/2
+ * - those created by ourself to perform work on HTTP/2 streams
+ */
+typedef struct h2_ctx {
+ const char *protocol; /* the protocol negotiated */
+ struct h2_session *session; /* the session established */
+ struct h2_task *task; /* the h2_task executing or NULL */
+ const char *hostname; /* hostname negotiated via SNI, optional */
+ server_rec *server; /* httpd server config selected. */
+ const struct h2_config *config; /* effective config in this context */
+} h2_ctx;
+
+/**
+ * Get (or create) a h2 context record for this connection.
+ * @param c the connection to look at
+ * @param create != 0 iff missing context shall be created
+ * @return h2 context of this connection
+ */
+h2_ctx *h2_ctx_get(const conn_rec *c, int create);
+void h2_ctx_clear(const conn_rec *c);
+
+h2_ctx *h2_ctx_rget(const request_rec *r);
+h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task);
+
+
+/* Set the h2 protocol established on this connection context or
+ * NULL when other protocols are in place.
+ */
+h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto);
+
+/* Set the server_rec relevant for this context.
+ */
+h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s);
+server_rec *h2_ctx_server_get(h2_ctx *ctx);
+
+struct h2_session *h2_ctx_session_get(h2_ctx *ctx);
+void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
+
+/**
+ * Get the h2 protocol negotiated for this connection, or NULL.
+ */
+const char *h2_ctx_protocol_get(const conn_rec *c);
+
+int h2_ctx_is_task(h2_ctx *ctx);
+
+struct h2_task *h2_ctx_get_task(h2_ctx *ctx);
+struct h2_task *h2_ctx_cget_task(conn_rec *c);
+struct h2_task *h2_ctx_rget_task(request_rec *r);
+
+#endif /* defined(__mod_h2__h2_ctx__) */
diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c
new file mode 100644
index 00000000..33189de0
--- /dev/null
+++ b/modules/http2/h2_filter.c
@@ -0,0 +1,290 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_conn_io.h"
+#include "h2_ctx.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_task.h"
+#include "h2_stream.h"
+#include "h2_request.h"
+#include "h2_response.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_version.h"
+
+#include "h2_filter.h"
+
+#define UNSET -1
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
+static apr_status_t consume_brigade(h2_filter_cin *cin,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t readlen = 0;
+
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+
+ apr_bucket* bucket = APR_BRIGADE_FIRST(bb);
+ if (APR_BUCKET_IS_METADATA(bucket)) {
+ /* we do nothing regarding any meta here */
+ }
+ else {
+ const char *bucket_data = NULL;
+ apr_size_t bucket_length = 0;
+ status = apr_bucket_read(bucket, &bucket_data,
+ &bucket_length, block);
+
+ if (status == APR_SUCCESS && bucket_length > 0) {
+ apr_size_t consumed = 0;
+
+ status = cin->cb(cin->cb_ctx, bucket_data, bucket_length, &consumed);
+ if (status == APR_SUCCESS && bucket_length > consumed) {
+ /* We have data left in the bucket. Split it. */
+ status = apr_bucket_split(bucket, consumed);
+ }
+ readlen += consumed;
+ cin->start_read = apr_time_now();
+ }
+ }
+ apr_bucket_delete(bucket);
+ }
+
+ if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+h2_filter_cin *h2_filter_cin_create(apr_pool_t *p, h2_filter_cin_cb *cb, void *ctx)
+{
+ h2_filter_cin *cin;
+
+ cin = apr_pcalloc(p, sizeof(*cin));
+ cin->pool = p;
+ cin->cb = cb;
+ cin->cb_ctx = ctx;
+ cin->start_read = UNSET;
+ return cin;
+}
+
+void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout)
+{
+ cin->timeout = timeout;
+}
+
+apr_status_t h2_filter_core_input(ap_filter_t* f,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_filter_cin *cin = f->ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_interval_time_t saved_timeout = UNSET;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "core_input(%ld): read, %s, mode=%d, readbytes=%ld",
+ (long)f->c->id, (block == APR_BLOCK_READ)? "BLOCK_READ" : "NONBLOCK_READ",
+ mode, (long)readbytes);
+
+ if (mode == AP_MODE_INIT || mode == AP_MODE_SPECULATIVE) {
+ return ap_get_brigade(f->next, brigade, mode, block, readbytes);
+ }
+
+ if (mode != AP_MODE_READBYTES) {
+ return (block == APR_BLOCK_READ)? APR_SUCCESS : APR_EAGAIN;
+ }
+
+ if (!cin->bb) {
+ cin->bb = apr_brigade_create(cin->pool, f->c->bucket_alloc);
+ }
+
+ if (!cin->socket) {
+ cin->socket = ap_get_conn_socket(f->c);
+ }
+
+ cin->start_read = apr_time_now();
+ if (APR_BRIGADE_EMPTY(cin->bb)) {
+ /* We only do a blocking read when we have no streams to process. So,
+ * in httpd scoreboard lingo, we are in a KEEPALIVE connection state.
+ * When reading non-blocking, we do have streams to process and update
+ * child with NULL request. That way, any current request information
+ * in the scoreboard is preserved.
+ */
+ if (block == APR_BLOCK_READ) {
+ if (cin->timeout > 0) {
+ apr_socket_timeout_get(cin->socket, &saved_timeout);
+ apr_socket_timeout_set(cin->socket, cin->timeout);
+ }
+ }
+ status = ap_get_brigade(f->next, cin->bb, AP_MODE_READBYTES,
+ block, readbytes);
+ if (saved_timeout != UNSET) {
+ apr_socket_timeout_set(cin->socket, saved_timeout);
+ }
+ }
+
+ switch (status) {
+ case APR_SUCCESS:
+ status = consume_brigade(cin, cin->bb, block);
+ break;
+ case APR_EOF:
+ case APR_EAGAIN:
+ case APR_TIMEUP:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "core_input(%ld): read", (long)f->c->id);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046)
+ "h2_conn_io: error reading");
+ break;
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * http2 connection status handler + stream out source
+ ******************************************************************************/
+
+static const char *H2_SOS_H2_STATUS = "http2-status";
+
+int h2_filter_h2_status_handler(request_rec *r)
+{
+ h2_ctx *ctx = h2_ctx_rget(r);
+ h2_task *task;
+
+ if (strcmp(r->handler, "http2-status")) {
+ return DECLINED;
+ }
+ if (r->method_number != M_GET) {
+ return DECLINED;
+ }
+
+ task = ctx? h2_ctx_get_task(ctx) : NULL;
+ if (task) {
+ /* We need to handle the actual output on the main thread, as
+ * we need to access h2_session information. */
+ apr_table_setn(r->notes, H2_RESP_SOS_NOTE, H2_SOS_H2_STATUS);
+ apr_table_setn(r->headers_out, "Content-Type", "application/json");
+ r->status = 200;
+ return DONE;
+ }
+ return DECLINED;
+}
+
+static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...)
+{
+ va_list args;
+ apr_status_t rv;
+
+ va_start(args, fmt);
+ rv = apr_brigade_vprintf(bb, NULL, NULL, fmt, args);
+ va_end(args);
+
+ return rv;
+}
+
+static apr_status_t h2_status_stream_filter(h2_stream *stream)
+{
+ h2_session *session = stream->session;
+ h2_mplx *mplx = session->mplx;
+ conn_rec *c = session->c;
+ h2_push_diary *diary;
+ apr_bucket_brigade *bb;
+ apr_status_t status;
+
+ if (!stream->response) {
+ return APR_EINVAL;
+ }
+
+ if (!stream->buffer) {
+ stream->buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
+ }
+ bb = stream->buffer;
+
+ apr_table_unset(stream->response->headers, "Content-Length");
+ stream->response->content_length = -1;
+
+ bbout(bb, "{\n");
+ bbout(bb, " \"HTTP2\": \"on\",\n");
+ bbout(bb, " \"H2PUSH\": \"%s\",\n", h2_session_push_enabled(session)? "on" : "off");
+ bbout(bb, " \"mod_http2_version\": \"%s\",\n", MOD_HTTP2_VERSION);
+ bbout(bb, " \"session_id\": %ld,\n", (long)session->id);
+ bbout(bb, " \"streams_max\": %d,\n", (int)session->max_stream_count);
+ bbout(bb, " \"this_stream\": %d,\n", stream->id);
+ bbout(bb, " \"streams_open\": %d,\n", (int)h2_ihash_count(session->streams));
+ bbout(bb, " \"max_stream_started\": %d,\n", mplx->max_stream_started);
+ bbout(bb, " \"requests_received\": %d,\n", session->remote.emitted_count);
+ bbout(bb, " \"responses_submitted\": %d,\n", session->responses_submitted);
+ bbout(bb, " \"streams_reset\": %d, \n", session->streams_reset);
+ bbout(bb, " \"pushes_promised\": %d,\n", session->pushes_promised);
+ bbout(bb, " \"pushes_submitted\": %d,\n", session->pushes_submitted);
+ bbout(bb, " \"pushes_reset\": %d,\n", session->pushes_reset);
+
+ diary = session->push_diary;
+ if (diary) {
+ const char *data;
+ const char *base64_digest;
+ apr_size_t len;
+
+ status = h2_push_diary_digest_get(diary, stream->pool, 256,
+ stream->request->authority, &data, &len);
+ if (status == APR_SUCCESS) {
+ base64_digest = h2_util_base64url_encode(data, len, stream->pool);
+ bbout(bb, " \"cache_digest\": \"%s\",\n", base64_digest);
+ }
+
+ /* try the reverse for testing purposes */
+ status = h2_push_diary_digest_set(diary, stream->request->authority, data, len);
+ if (status == APR_SUCCESS) {
+ status = h2_push_diary_digest_get(diary, stream->pool, 256,
+ stream->request->authority, &data, &len);
+ if (status == APR_SUCCESS) {
+ base64_digest = h2_util_base64url_encode(data, len, stream->pool);
+ bbout(bb, " \"cache_digest^2\": \"%s\",\n", base64_digest);
+ }
+ }
+ }
+ bbout(bb, " \"frames_received\": %ld,\n", (long)session->frames_received);
+ bbout(bb, " \"frames_sent\": %ld,\n", (long)session->frames_sent);
+ bbout(bb, " \"bytes_received\": %"APR_UINT64_T_FMT",\n", session->io.bytes_read);
+ bbout(bb, " \"bytes_sent\": %"APR_UINT64_T_FMT"\n", session->io.bytes_written);
+ bbout(bb, "}\n");
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_stream_filter(h2_stream *stream)
+{
+ const char *fname = stream->response? stream->response->sos_filter : NULL;
+ if (fname && !strcmp(H2_SOS_H2_STATUS, fname)) {
+ return h2_status_stream_filter(stream);
+ }
+ return APR_SUCCESS;
+}
+
diff --git a/modules/http2/h2_filter.h b/modules/http2/h2_filter.h
new file mode 100644
index 00000000..5ba7d158
--- /dev/null
+++ b/modules/http2/h2_filter.h
@@ -0,0 +1,51 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_filter__
+#define __mod_h2__h2_filter__
+
+struct h2_stream;
+struct h2_session;
+
+typedef apr_status_t h2_filter_cin_cb(void *ctx,
+ const char *data, apr_size_t len,
+ apr_size_t *readlen);
+
+typedef struct h2_filter_cin {
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+ h2_filter_cin_cb *cb;
+ void *cb_ctx;
+ apr_socket_t *socket;
+ apr_interval_time_t timeout;
+ apr_time_t start_read;
+} h2_filter_cin;
+
+h2_filter_cin *h2_filter_cin_create(apr_pool_t *p, h2_filter_cin_cb *cb, void *ctx);
+
+void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout);
+
+apr_status_t h2_filter_core_input(ap_filter_t* filter,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes);
+
+#define H2_RESP_SOS_NOTE "h2-sos-filter"
+
+apr_status_t h2_stream_filter(struct h2_stream *stream);
+int h2_filter_h2_status_handler(request_rec *r);
+
+#endif /* __mod_h2__h2_filter__ */
diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c
new file mode 100644
index 00000000..0f893ec1
--- /dev/null
+++ b/modules/http2/h2_from_h1.c
@@ -0,0 +1,589 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <util_time.h>
+
+#include "h2_private.h"
+#include "h2_response.h"
+#include "h2_from_h1.h"
+#include "h2_task.h"
+#include "h2_util.h"
+
+
+static void set_state(h2_from_h1 *from_h1, h2_from_h1_state_t state);
+
+h2_from_h1 *h2_from_h1_create(int stream_id, apr_pool_t *pool)
+{
+ h2_from_h1 *from_h1 = apr_pcalloc(pool, sizeof(h2_from_h1));
+ if (from_h1) {
+ from_h1->stream_id = stream_id;
+ from_h1->pool = pool;
+ from_h1->state = H2_RESP_ST_STATUS_LINE;
+ from_h1->hlines = apr_array_make(pool, 10, sizeof(char *));
+ }
+ return from_h1;
+}
+
+static void set_state(h2_from_h1 *from_h1, h2_from_h1_state_t state)
+{
+ if (from_h1->state != state) {
+ from_h1->state = state;
+ }
+}
+
+h2_response *h2_from_h1_get_response(h2_from_h1 *from_h1)
+{
+ return from_h1->response;
+}
+
+static apr_status_t make_h2_headers(h2_from_h1 *from_h1, request_rec *r)
+{
+ from_h1->response = h2_response_create(from_h1->stream_id, 0,
+ from_h1->http_status,
+ from_h1->hlines,
+ r->notes,
+ from_h1->pool);
+ from_h1->content_length = from_h1->response->content_length;
+ from_h1->chunked = r->chunked;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection, APLOGNO(03197)
+ "h2_from_h1(%d): converted headers, content-length: %d"
+ ", chunked=%d",
+ from_h1->stream_id, (int)from_h1->content_length,
+ (int)from_h1->chunked);
+
+ set_state(from_h1, ((from_h1->chunked || from_h1->content_length > 0)?
+ H2_RESP_ST_BODY : H2_RESP_ST_DONE));
+ /* We are ready to be sent to the client */
+ return APR_SUCCESS;
+}
+
+static apr_status_t parse_header(h2_from_h1 *from_h1, ap_filter_t* f,
+ char *line) {
+ (void)f;
+
+ if (line[0] == ' ' || line[0] == '\t') {
+ char **plast;
+ /* continuation line from the header before this */
+ while (line[0] == ' ' || line[0] == '\t') {
+ ++line;
+ }
+
+ plast = apr_array_pop(from_h1->hlines);
+ if (plast == NULL) {
+ /* not well formed */
+ return APR_EINVAL;
+ }
+ APR_ARRAY_PUSH(from_h1->hlines, const char*) = apr_psprintf(from_h1->pool, "%s %s", *plast, line);
+ }
+ else {
+ /* new header line */
+ APR_ARRAY_PUSH(from_h1->hlines, const char*) = apr_pstrdup(from_h1->pool, line);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t get_line(h2_from_h1 *from_h1, apr_bucket_brigade *bb,
+ ap_filter_t* f, char *line, apr_size_t len)
+{
+ apr_status_t status;
+ if (!from_h1->bb) {
+ from_h1->bb = apr_brigade_create(from_h1->pool, f->c->bucket_alloc);
+ }
+ else {
+ apr_brigade_cleanup(from_h1->bb);
+ }
+ status = apr_brigade_split_line(from_h1->bb, bb,
+ APR_BLOCK_READ,
+ HUGE_STRING_LEN);
+ if (status == APR_SUCCESS) {
+ --len;
+ status = apr_brigade_flatten(from_h1->bb, line, &len);
+ if (status == APR_SUCCESS) {
+ /* we assume a non-0 containing line and remove
+ * trailing crlf. */
+ line[len] = '\0';
+ if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
+ len -= 2;
+ line[len] = '\0';
+ }
+
+ apr_brigade_cleanup(from_h1->bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_from_h1(%d): read line: %s",
+ from_h1->stream_id, line);
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_from_h1_read_response(h2_from_h1 *from_h1, ap_filter_t* f,
+ apr_bucket_brigade* bb)
+{
+ apr_status_t status = APR_SUCCESS;
+ char line[HUGE_STRING_LEN];
+
+ if ((from_h1->state == H2_RESP_ST_BODY)
+ || (from_h1->state == H2_RESP_ST_DONE)) {
+ if (from_h1->chunked) {
+ /* The httpd core HTTP_HEADER filter has or will install the
+ * "CHUNK" output transcode filter, which appears further down
+ * the filter chain. We do not want it for HTTP/2.
+ * Once we successfully deinstalled it, this filter has no
+ * further function and we remove it.
+ */
+ status = ap_remove_output_filter_byhandle(f->r->output_filters,
+ "CHUNK");
+ if (status == APR_SUCCESS) {
+ ap_remove_output_filter(f);
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_from_h1(%d): read_response", from_h1->stream_id);
+
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+
+ switch (from_h1->state) {
+
+ case H2_RESP_ST_STATUS_LINE:
+ case H2_RESP_ST_HEADERS:
+ status = get_line(from_h1, bb, f, line, sizeof(line));
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ if (from_h1->state == H2_RESP_ST_STATUS_LINE) {
+ /* instead of parsing, just take it directly */
+ from_h1->http_status = f->r->status;
+ from_h1->state = H2_RESP_ST_HEADERS;
+ }
+ else if (line[0] == '\0') {
+ /* end of headers, create the h2_response and
+ * pass the rest of the brigade down the filter
+ * chain.
+ */
+ status = make_h2_headers(from_h1, f->r);
+ if (from_h1->bb) {
+ apr_brigade_destroy(from_h1->bb);
+ from_h1->bb = NULL;
+ }
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ return ap_pass_brigade(f->next, bb);
+ }
+ }
+ else {
+ status = parse_header(from_h1, f, line);
+ }
+ break;
+
+ default:
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ }
+
+ return status;
+}
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ (void)key;
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fix_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
+ (void *) varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool)
+{
+ char *date = NULL;
+ const char *proxy_date = NULL;
+ const char *server = NULL;
+ const char *us = ap_get_server_banner();
+
+ /*
+ * keep the set-by-proxy server and date headers, otherwise
+ * generate a new server header / date header
+ */
+ if (r && r->proxyreq != PROXYREQ_NONE) {
+ proxy_date = apr_table_get(r->headers_out, "Date");
+ if (!proxy_date) {
+ /*
+ * proxy_date needs to be const. So use date for the creation of
+ * our own Date header and pass it over to proxy_date later to
+ * avoid a compiler warning.
+ */
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ }
+ server = apr_table_get(r->headers_out, "Server");
+ }
+ else {
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r? r->request_time : apr_time_now());
+ }
+
+ apr_table_setn(headers, "Date", proxy_date ? proxy_date : date );
+ if (r) {
+ apr_table_unset(r->headers_out, "Date");
+ }
+
+ if (!server && *us) {
+ server = us;
+ }
+ if (server) {
+ apr_table_setn(headers, "Server", server);
+ if (r) {
+ apr_table_unset(r->headers_out, "Server");
+ }
+ }
+}
+
+static int copy_header(void *ctx, const char *name, const char *value)
+{
+ apr_table_t *headers = ctx;
+
+ apr_table_addn(headers, name, value);
+ return 1;
+}
+
+static h2_response *create_response(h2_from_h1 *from_h1, request_rec *r)
+{
+ const char *clheader;
+ const char *ctype;
+ apr_table_t *headers;
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ apr_table_clear(r->err_headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fix_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ ap_set_keepalive(r);
+
+ if (r->chunked) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ ctype = ap_make_content_type(r, r->content_type);
+ if (ctype) {
+ apr_table_setn(r->headers_out, "Content-Type", ctype);
+ }
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ unsigned int i;
+ char *token;
+ char **languages = (char **)(r->content_languages->elts);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!apr_strnatcasecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
+ }
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
+ }
+
+ /*
+ * Control cachability for non-cachable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_addn(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ headers = apr_table_make(r->pool, 10);
+
+ h2_from_h1_set_basic_http_header(headers, r, r->pool);
+ if (r->status == HTTP_NOT_MODIFIED) {
+ apr_table_do((int (*)(void *, const char *, const char *)) copy_header,
+ (void *) headers, r->headers_out,
+ "ETag",
+ "Content-Location",
+ "Expires",
+ "Cache-Control",
+ "Vary",
+ "Warning",
+ "WWW-Authenticate",
+ "Proxy-Authenticate",
+ "Set-Cookie",
+ "Set-Cookie2",
+ NULL);
+ }
+ else {
+ apr_table_do((int (*)(void *, const char *, const char *)) copy_header,
+ (void *) headers, r->headers_out, NULL);
+ }
+
+ return h2_response_rcreate(from_h1->stream_id, r, headers, r->pool);
+}
+
+apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_task *task = f->ctx;
+ h2_from_h1 *from_h1 = task->output.from_h1;
+ request_rec *r = f->r;
+ apr_bucket *b;
+ ap_bucket_error *eb = NULL;
+
+ AP_DEBUG_ASSERT(from_h1 != NULL);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_from_h1(%d): output_filter called", from_h1->stream_id);
+
+ if (r->header_only && from_h1->response) {
+ /* throw away any data after we have compiled the response */
+ apr_brigade_cleanup(bb);
+ return OK;
+ }
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_ERROR(b) && !eb) {
+ eb = b->data;
+ continue;
+ }
+ /*
+ * If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ if (AP_BUCKET_IS_EOC(b)) {
+ ap_remove_output_filter(f);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_from_h1(%d): eoc bucket passed",
+ from_h1->stream_id);
+ return ap_pass_brigade(f->next, bb);
+ }
+ }
+
+ if (eb) {
+ int st = eb->status;
+ apr_brigade_cleanup(bb);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
+ "h2_from_h1(%d): err bucket status=%d",
+ from_h1->stream_id, st);
+ ap_die(st, r);
+ return AP_FILTER_ERROR;
+ }
+
+ from_h1->response = create_response(from_h1, r);
+ if (from_h1->response == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
+ "h2_from_h1(%d): unable to create response",
+ from_h1->stream_id);
+ return APR_ENOMEM;
+ }
+
+ if (r->header_only) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_from_h1(%d): header_only, cleanup output brigade",
+ from_h1->stream_id);
+ apr_brigade_cleanup(bb);
+ return OK;
+ }
+
+ r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
+
+ ap_remove_output_filter(f);
+ if (APLOGctrace1(f->c)) {
+ apr_off_t len = 0;
+ apr_brigade_length(bb, 0, &len);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_from_h1(%d): removed header filter, passing brigade "
+ "len=%ld", from_h1->stream_id, (long)len);
+ }
+ return ap_pass_brigade(f->next, bb);
+}
+
+apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_task *task = f->ctx;
+ h2_from_h1 *from_h1 = task->output.from_h1;
+ request_rec *r = f->r;
+ apr_bucket *b;
+
+ if (from_h1 && from_h1->response) {
+ /* Detect the EOR bucket and forward any trailers that may have
+ * been set to our h2_response.
+ */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_EOR(b)) {
+ /* FIXME: need a better test case than this.
+ apr_table_setn(r->trailers_out, "X", "1"); */
+ if (r->trailers_out && !apr_is_empty_table(r->trailers_out)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
+ "h2_from_h1(%d): trailers filter, saving trailers",
+ from_h1->stream_id);
+ h2_response_set_trailers(from_h1->response,
+ apr_table_clone(from_h1->pool,
+ r->trailers_out));
+ }
+ break;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
diff --git a/modules/http2/h2_from_h1.h b/modules/http2/h2_from_h1.h
new file mode 100644
index 00000000..71cc35fa
--- /dev/null
+++ b/modules/http2/h2_from_h1.h
@@ -0,0 +1,75 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_from_h1__
+#define __mod_h2__h2_from_h1__
+
+/**
+ * h2_from_h1 parses a HTTP/1.1 response into
+ * - response status
+ * - a list of header values
+ * - a series of bytes that represent the response body alone, without
+ * any meta data, such as inserted by chunked transfer encoding.
+ *
+ * All data is allocated from the stream memory pool.
+ *
+ * Again, see comments in h2_request: ideally we would take the headers
+ * and status from the httpd structures instead of parsing them here, but
+ * we need to have all handlers and filters involved in request/response
+ * processing, so this seems to be the way for now.
+ */
+
+typedef enum {
+ H2_RESP_ST_STATUS_LINE, /* parsing http/1 status line */
+ H2_RESP_ST_HEADERS, /* parsing http/1 response headers */
+ H2_RESP_ST_BODY, /* transferring response body */
+ H2_RESP_ST_DONE /* complete response converted */
+} h2_from_h1_state_t;
+
+struct h2_response;
+
+typedef struct h2_from_h1 h2_from_h1;
+
+struct h2_from_h1 {
+ int stream_id;
+ h2_from_h1_state_t state;
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+
+ apr_off_t content_length;
+ int chunked;
+
+ int http_status;
+ apr_array_header_t *hlines;
+
+ struct h2_response *response;
+};
+
+
+h2_from_h1 *h2_from_h1_create(int stream_id, apr_pool_t *pool);
+
+apr_status_t h2_from_h1_read_response(h2_from_h1 *from_h1,
+ ap_filter_t* f, apr_bucket_brigade* bb);
+
+struct h2_response *h2_from_h1_get_response(h2_from_h1 *from_h1);
+
+apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb);
+
+apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb);
+
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool);
+
+#endif /* defined(__mod_h2__h2_from_h1__) */
diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c
new file mode 100644
index 00000000..825cd77e
--- /dev/null
+++ b/modules/http2/h2_h2.c
@@ -0,0 +1,714 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+
+#include "mod_ssl.h"
+
+#include "mod_http2.h"
+#include "h2_private.h"
+
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_conn.h"
+#include "h2_request.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_h2.h"
+#include "mod_http2.h"
+
+const char *h2_tls_protos[] = {
+ "h2", NULL
+};
+
+const char *h2_clear_protos[] = {
+ "h2c", NULL
+};
+
+const char *H2_MAGIC_TOKEN = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
+
+/*******************************************************************************
+ * The optional mod_ssl functions we need.
+ */
+static APR_OPTIONAL_FN_TYPE(ssl_engine_disable) *opt_ssl_engine_disable;
+static APR_OPTIONAL_FN_TYPE(ssl_is_https) *opt_ssl_is_https;
+static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *opt_ssl_var_lookup;
+
+
+/*******************************************************************************
+ * HTTP/2 error stuff
+ */
+static const char *h2_err_descr[] = {
+ "no error", /* 0x0 */
+ "protocol error",
+ "internal error",
+ "flow control error",
+ "settings timeout",
+ "stream closed", /* 0x5 */
+ "frame size error",
+ "refused stream",
+ "cancel",
+ "compression error",
+ "connect error", /* 0xa */
+ "enhance your calm",
+ "inadequate security",
+ "http/1.1 required",
+};
+
+const char *h2_h2_err_description(unsigned int h2_error)
+{
+ if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) {
+ return h2_err_descr[h2_error];
+ }
+ return "unknown http/2 error code";
+}
+
+/*******************************************************************************
+ * Check connection security requirements of RFC 7540
+ */
+
+/*
+ * Black Listed Ciphers from RFC 7549 Appendix A
+ *
+ */
+static const char *RFC7540_names[] = {
+ /* ciphers with NULL encrpytion */
+ "NULL-MD5", /* TLS_NULL_WITH_NULL_NULL */
+ /* same */ /* TLS_RSA_WITH_NULL_MD5 */
+ "NULL-SHA", /* TLS_RSA_WITH_NULL_SHA */
+ "NULL-SHA256", /* TLS_RSA_WITH_NULL_SHA256 */
+ "PSK-NULL-SHA", /* TLS_PSK_WITH_NULL_SHA */
+ "DHE-PSK-NULL-SHA", /* TLS_DHE_PSK_WITH_NULL_SHA */
+ "RSA-PSK-NULL-SHA", /* TLS_RSA_PSK_WITH_NULL_SHA */
+ "PSK-NULL-SHA256", /* TLS_PSK_WITH_NULL_SHA256 */
+ "PSK-NULL-SHA384", /* TLS_PSK_WITH_NULL_SHA384 */
+ "DHE-PSK-NULL-SHA256", /* TLS_DHE_PSK_WITH_NULL_SHA256 */
+ "DHE-PSK-NULL-SHA384", /* TLS_DHE_PSK_WITH_NULL_SHA384 */
+ "RSA-PSK-NULL-SHA256", /* TLS_RSA_PSK_WITH_NULL_SHA256 */
+ "RSA-PSK-NULL-SHA384", /* TLS_RSA_PSK_WITH_NULL_SHA384 */
+ "ECDH-ECDSA-NULL-SHA", /* TLS_ECDH_ECDSA_WITH_NULL_SHA */
+ "ECDHE-ECDSA-NULL-SHA", /* TLS_ECDHE_ECDSA_WITH_NULL_SHA */
+ "ECDH-RSA-NULL-SHA", /* TLS_ECDH_RSA_WITH_NULL_SHA */
+ "ECDHE-RSA-NULL-SHA", /* TLS_ECDHE_RSA_WITH_NULL_SHA */
+ "AECDH-NULL-SHA", /* TLS_ECDH_anon_WITH_NULL_SHA */
+ "ECDHE-PSK-NULL-SHA", /* TLS_ECDHE_PSK_WITH_NULL_SHA */
+ "ECDHE-PSK-NULL-SHA256", /* TLS_ECDHE_PSK_WITH_NULL_SHA256 */
+ "ECDHE-PSK-NULL-SHA384", /* TLS_ECDHE_PSK_WITH_NULL_SHA384 */
+
+ /* DES/3DES ciphers */
+ "PSK-3DES-EDE-CBC-SHA", /* TLS_PSK_WITH_3DES_EDE_CBC_SHA */
+ "DHE-PSK-3DES-EDE-CBC-SHA", /* TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA */
+ "RSA-PSK-3DES-EDE-CBC-SHA", /* TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA */
+ "ECDH-ECDSA-DES-CBC3-SHA", /* TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-ECDSA-DES-CBC3-SHA", /* TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDH-RSA-DES-CBC3-SHA", /* TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-RSA-DES-CBC3-SHA", /* TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA */
+ "AECDH-DES-CBC3-SHA", /* TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA */
+ "SRP-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA */
+ "SRP-RSA-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA */
+ "SRP-DSS-3DES-EDE-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA */
+ "ECDHE-PSK-3DES-EDE-CBC-SHA", /* TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA */
+ "DES-CBC-SHA", /* TLS_RSA_WITH_DES_CBC_SHA */
+ "DES-CBC3-SHA", /* TLS_RSA_WITH_3DES_EDE_CBC_SHA */
+ "DHE-DSS-DES-CBC3-SHA", /* TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA */
+ "DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_WITH_DES_CBC_SHA */
+ "DHE-RSA-DES-CBC3-SHA", /* TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA */
+ "ADH-DES-CBC-SHA", /* TLS_DH_anon_WITH_DES_CBC_SHA */
+ "ADH-DES-CBC3-SHA", /* TLS_DH_anon_WITH_3DES_EDE_CBC_SHA */
+ "EXP-DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA */
+ "DH-DSS-DES-CBC-SHA", /* TLS_DH_DSS_WITH_DES_CBC_SHA */
+ "DH-DSS-DES-CBC3-SHA", /* TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA */
+ "EXP-DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "DH-RSA-DES-CBC-SHA", /* TLS_DH_RSA_WITH_DES_CBC_SHA */
+ "DH-RSA-DES-CBC3-SHA", /* TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA */
+
+ /* blacklisted EXPORT ciphers */
+ "EXP-RC4-MD5", /* TLS_RSA_EXPORT_WITH_RC4_40_MD5 */
+ "EXP-RC2-CBC-MD5", /* TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 */
+ "EXP-DES-CBC-SHA", /* TLS_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-DHE-DSS-DES-CBC-SHA", /* TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-DHE-RSA-DES-CBC-SHA", /* TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-ADH-DES-CBC-SHA", /* TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA */
+ "EXP-ADH-RC4-MD5", /* TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 */
+
+ /* blacklisted RC4 encryption */
+ "RC4-MD5", /* TLS_RSA_WITH_RC4_128_MD5 */
+ "RC4-SHA", /* TLS_RSA_WITH_RC4_128_SHA */
+ "ADH-RC4-MD5", /* TLS_DH_anon_WITH_RC4_128_MD5 */
+ "KRB5-RC4-SHA", /* TLS_KRB5_WITH_RC4_128_SHA */
+ "KRB5-RC4-MD5", /* TLS_KRB5_WITH_RC4_128_MD5 */
+ "EXP-KRB5-RC4-SHA", /* TLS_KRB5_EXPORT_WITH_RC4_40_SHA */
+ "EXP-KRB5-RC4-MD5", /* TLS_KRB5_EXPORT_WITH_RC4_40_MD5 */
+ "PSK-RC4-SHA", /* TLS_PSK_WITH_RC4_128_SHA */
+ "DHE-PSK-RC4-SHA", /* TLS_DHE_PSK_WITH_RC4_128_SHA */
+ "RSA-PSK-RC4-SHA", /* TLS_RSA_PSK_WITH_RC4_128_SHA */
+ "ECDH-ECDSA-RC4-SHA", /* TLS_ECDH_ECDSA_WITH_RC4_128_SHA */
+ "ECDHE-ECDSA-RC4-SHA", /* TLS_ECDHE_ECDSA_WITH_RC4_128_SHA */
+ "ECDH-RSA-RC4-SHA", /* TLS_ECDH_RSA_WITH_RC4_128_SHA */
+ "ECDHE-RSA-RC4-SHA", /* TLS_ECDHE_RSA_WITH_RC4_128_SHA */
+ "AECDH-RC4-SHA", /* TLS_ECDH_anon_WITH_RC4_128_SHA */
+ "ECDHE-PSK-RC4-SHA", /* TLS_ECDHE_PSK_WITH_RC4_128_SHA */
+
+ /* blacklisted AES128 encrpytion ciphers */
+ "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA */
+ "DH-DSS-AES128-SHA", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA */
+ "DH-RSA-AES128-SHA", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA */
+ "DHE-DSS-AES128-SHA", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA */
+ "DHE-RSA-AES128-SHA", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA */
+ "ADH-AES128-SHA", /* TLS_DH_anon_WITH_AES_128_CBC_SHA */
+ "AES128-SHA256", /* TLS_RSA_WITH_AES_128_CBC_SHA256 */
+ "DH-DSS-AES128-SHA256", /* TLS_DH_DSS_WITH_AES_128_CBC_SHA256 */
+ "DH-RSA-AES128-SHA256", /* TLS_DH_RSA_WITH_AES_128_CBC_SHA256 */
+ "DHE-DSS-AES128-SHA256", /* TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 */
+ "DHE-RSA-AES128-SHA256", /* TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-ECDSA-AES128-SHA", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA */
+ "ECDHE-ECDSA-AES128-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA */
+ "ECDH-RSA-AES128-SHA", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA */
+ "ECDHE-RSA-AES128-SHA", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA */
+ "AECDH-AES128-SHA", /* TLS_ECDH_anon_WITH_AES_128_CBC_SHA */
+ "ECDHE-ECDSA-AES128-SHA256", /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-ECDSA-AES128-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 */
+ "ECDHE-RSA-AES128-SHA256", /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 */
+ "ECDH-RSA-AES128-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 */
+ "ADH-AES128-SHA256", /* TLS_DH_anon_WITH_AES_128_CBC_SHA256 */
+ "PSK-AES128-CBC-SHA", /* TLS_PSK_WITH_AES_128_CBC_SHA */
+ "DHE-PSK-AES128-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA */
+ "RSA-PSK-AES128-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA */
+ "PSK-AES128-CBC-SHA256", /* TLS_PSK_WITH_AES_128_CBC_SHA256 */
+ "DHE-PSK-AES128-CBC-SHA256", /* TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 */
+ "RSA-PSK-AES128-CBC-SHA256", /* TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 */
+ "ECDHE-PSK-AES128-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA */
+ "ECDHE-PSK-AES128-CBC-SHA256", /* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 */
+ "AES128-CCM", /* TLS_RSA_WITH_AES_128_CCM */
+ "AES128-CCM8", /* TLS_RSA_WITH_AES_128_CCM_8 */
+ "PSK-AES128-CCM", /* TLS_PSK_WITH_AES_128_CCM */
+ "PSK-AES128-CCM8", /* TLS_PSK_WITH_AES_128_CCM_8 */
+ "AES128-GCM-SHA256", /* TLS_RSA_WITH_AES_128_GCM_SHA256 */
+ "DH-RSA-AES128-GCM-SHA256", /* TLS_DH_RSA_WITH_AES_128_GCM_SHA256 */
+ "DH-DSS-AES128-GCM-SHA256", /* TLS_DH_DSS_WITH_AES_128_GCM_SHA256 */
+ "ADH-AES128-GCM-SHA256", /* TLS_DH_anon_WITH_AES_128_GCM_SHA256 */
+ "PSK-AES128-GCM-SHA256", /* TLS_PSK_WITH_AES_128_GCM_SHA256 */
+ "RSA-PSK-AES128-GCM-SHA256", /* TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 */
+ "ECDH-ECDSA-AES128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 */
+ "ECDH-RSA-AES128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 */
+ "SRP-AES-128-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_128_CBC_SHA */
+ "SRP-RSA-AES-128-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA */
+ "SRP-DSS-AES-128-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA */
+
+ /* blacklisted AES256 encrpytion ciphers */
+ "AES256-SHA", /* TLS_RSA_WITH_AES_256_CBC_SHA */
+ "DH-DSS-AES256-SHA", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA */
+ "DH-RSA-AES256-SHA", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA */
+ "DHE-DSS-AES256-SHA", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA */
+ "DHE-RSA-AES256-SHA", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA */
+ "ADH-AES256-SHA", /* TLS_DH_anon_WITH_AES_256_CBC_SHA */
+ "AES256-SHA256", /* TLS_RSA_WITH_AES_256_CBC_SHA256 */
+ "DH-DSS-AES256-SHA256", /* TLS_DH_DSS_WITH_AES_256_CBC_SHA256 */
+ "DH-RSA-AES256-SHA256", /* TLS_DH_RSA_WITH_AES_256_CBC_SHA256 */
+ "DHE-DSS-AES256-SHA256", /* TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 */
+ "DHE-RSA-AES256-SHA256", /* TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 */
+ "ADH-AES256-SHA256", /* TLS_DH_anon_WITH_AES_256_CBC_SHA256 */
+ "ECDH-ECDSA-AES256-SHA", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA */
+ "ECDHE-ECDSA-AES256-SHA", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA */
+ "ECDH-RSA-AES256-SHA", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA */
+ "ECDHE-RSA-AES256-SHA", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA */
+ "AECDH-AES256-SHA", /* TLS_ECDH_anon_WITH_AES_256_CBC_SHA */
+ "ECDHE-ECDSA-AES256-SHA384", /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 */
+ "ECDH-ECDSA-AES256-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 */
+ "ECDHE-RSA-AES256-SHA384", /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 */
+ "ECDH-RSA-AES256-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 */
+ "PSK-AES256-CBC-SHA", /* TLS_PSK_WITH_AES_256_CBC_SHA */
+ "DHE-PSK-AES256-CBC-SHA", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA */
+ "RSA-PSK-AES256-CBC-SHA", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA */
+ "PSK-AES256-CBC-SHA384", /* TLS_PSK_WITH_AES_256_CBC_SHA384 */
+ "DHE-PSK-AES256-CBC-SHA384", /* TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 */
+ "RSA-PSK-AES256-CBC-SHA384", /* TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 */
+ "ECDHE-PSK-AES256-CBC-SHA", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA */
+ "ECDHE-PSK-AES256-CBC-SHA384", /* TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 */
+ "SRP-AES-256-CBC-SHA", /* TLS_SRP_SHA_WITH_AES_256_CBC_SHA */
+ "SRP-RSA-AES-256-CBC-SHA", /* TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA */
+ "SRP-DSS-AES-256-CBC-SHA", /* TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA */
+ "AES256-CCM", /* TLS_RSA_WITH_AES_256_CCM */
+ "AES256-CCM8", /* TLS_RSA_WITH_AES_256_CCM_8 */
+ "PSK-AES256-CCM", /* TLS_PSK_WITH_AES_256_CCM */
+ "PSK-AES256-CCM8", /* TLS_PSK_WITH_AES_256_CCM_8 */
+ "AES256-GCM-SHA384", /* TLS_RSA_WITH_AES_256_GCM_SHA384 */
+ "DH-RSA-AES256-GCM-SHA384", /* TLS_DH_RSA_WITH_AES_256_GCM_SHA384 */
+ "DH-DSS-AES256-GCM-SHA384", /* TLS_DH_DSS_WITH_AES_256_GCM_SHA384 */
+ "ADH-AES256-GCM-SHA384", /* TLS_DH_anon_WITH_AES_256_GCM_SHA384 */
+ "PSK-AES256-GCM-SHA384", /* TLS_PSK_WITH_AES_256_GCM_SHA384 */
+ "RSA-PSK-AES256-GCM-SHA384", /* TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 */
+ "ECDH-ECDSA-AES256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 */
+ "ECDH-RSA-AES256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 */
+
+ /* blacklisted CAMELLIA128 encrpytion ciphers */
+ "CAMELLIA128-SHA", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "DH-DSS-CAMELLIA128-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA */
+ "DH-RSA-CAMELLIA128-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "DHE-DSS-CAMELLIA128-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA */
+ "DHE-RSA-CAMELLIA128-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA */
+ "ADH-CAMELLIA128-SHA", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA */
+ "ECDHE-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDH-ECDSA-CAMELLIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDHE-RSA-CAMELLIA128-SHA256", /* TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDH-RSA-CAMELLIA128-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "PSK-CAMELLIA128-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-PSK-CAMELLIA128-SHA256", /* TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "RSA-PSK-CAMELLIA128-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ECDHE-PSK-CAMELLIA128-SHA256", /* TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 */
+ "CAMELLIA128-GCM-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "DH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "DH-DSS-CAMELLIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ADH-CAMELLIA128-GCM-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ECDH-ECDSA-CAMELLIA128-GCM-SHA256",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "ECDH-RSA-CAMELLIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 */
+ "PSK-CAMELLIA128-GCM-SHA256", /* TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 */
+ "RSA-PSK-CAMELLIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 */
+ "CAMELLIA128-SHA256", /* TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DH-DSS-CAMELLIA128-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DH-RSA-CAMELLIA128-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-DSS-CAMELLIA128-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 */
+ "DHE-RSA-CAMELLIA128-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 */
+ "ADH-CAMELLIA128-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 */
+
+ /* blacklisted CAMELLIA256 encrpytion ciphers */
+ "CAMELLIA256-SHA", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "DH-RSA-CAMELLIA256-SHA", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "DH-DSS-CAMELLIA256-SHA", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA */
+ "DHE-DSS-CAMELLIA256-SHA", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA */
+ "DHE-RSA-CAMELLIA256-SHA", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA */
+ "ADH-CAMELLIA256-SHA", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA */
+ "ECDHE-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDH-ECDSA-CAMELLIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDHE-RSA-CAMELLIA256-SHA384", /* TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDH-RSA-CAMELLIA256-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 */
+ "PSK-CAMELLIA256-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "DHE-PSK-CAMELLIA256-SHA384", /* TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "RSA-PSK-CAMELLIA256-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "ECDHE-PSK-CAMELLIA256-SHA384", /* TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 */
+ "CAMELLIA256-SHA256", /* TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DH-DSS-CAMELLIA256-SHA256", /* TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DH-RSA-CAMELLIA256-SHA256", /* TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DHE-DSS-CAMELLIA256-SHA256", /* TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 */
+ "DHE-RSA-CAMELLIA256-SHA256", /* TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 */
+ "ADH-CAMELLIA256-SHA256", /* TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 */
+ "CAMELLIA256-GCM-SHA384", /* TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "DH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "DH-DSS-CAMELLIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ADH-CAMELLIA256-GCM-SHA384", /* TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ECDH-ECDSA-CAMELLIA256-GCM-SHA384",/* TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "ECDH-RSA-CAMELLIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 */
+ "PSK-CAMELLIA256-GCM-SHA384", /* TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 */
+ "RSA-PSK-CAMELLIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 */
+
+ /* The blacklisted ARIA encrpytion ciphers */
+ "ARIA128-SHA256", /* TLS_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ARIA256-SHA384", /* TLS_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "DH-DSS-ARIA128-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 */
+ "DH-DSS-ARIA256-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 */
+ "DH-RSA-ARIA128-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "DH-RSA-ARIA256-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-DSS-ARIA128-SHA256", /* TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-DSS-ARIA256-SHA384", /* TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-RSA-ARIA128-SHA256", /* TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-RSA-ARIA256-SHA384", /* TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ADH-ARIA128-SHA256", /* TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 */
+ "ADH-ARIA256-SHA384", /* TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 */
+ "ECDHE-ECDSA-ARIA128-SHA256", /* TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-ECDSA-ARIA256-SHA384", /* TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDH-ECDSA-ARIA128-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDH-ECDSA-ARIA256-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDHE-RSA-ARIA128-SHA256", /* TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-RSA-ARIA256-SHA384", /* TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ECDH-RSA-ARIA128-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 */
+ "ECDH-RSA-ARIA256-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 */
+ "ARIA128-GCM-SHA256", /* TLS_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "ARIA256-GCM-SHA384", /* TLS_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "DH-DSS-ARIA128-GCM-SHA256", /* TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 */
+ "DH-DSS-ARIA256-GCM-SHA384", /* TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 */
+ "DH-RSA-ARIA128-GCM-SHA256", /* TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "DH-RSA-ARIA256-GCM-SHA384", /* TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "ADH-ARIA128-GCM-SHA256", /* TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 */
+ "ADH-ARIA256-GCM-SHA384", /* TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 */
+ "ECDH-ECDSA-ARIA128-GCM-SHA256", /* TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 */
+ "ECDH-ECDSA-ARIA256-GCM-SHA384", /* TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 */
+ "ECDH-RSA-ARIA128-GCM-SHA256", /* TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 */
+ "ECDH-RSA-ARIA256-GCM-SHA384", /* TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 */
+ "PSK-ARIA128-SHA256", /* TLS_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "PSK-ARIA256-SHA384", /* TLS_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "DHE-PSK-ARIA128-SHA256", /* TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "DHE-PSK-ARIA256-SHA384", /* TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "RSA-PSK-ARIA128-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "RSA-PSK-ARIA256-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 */
+ "ARIA128-GCM-SHA256", /* TLS_PSK_WITH_ARIA_128_GCM_SHA256 */
+ "ARIA256-GCM-SHA384", /* TLS_PSK_WITH_ARIA_256_GCM_SHA384 */
+ "RSA-PSK-ARIA128-GCM-SHA256", /* TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 */
+ "RSA-PSK-ARIA256-GCM-SHA384", /* TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 */
+ "ECDHE-PSK-ARIA128-SHA256", /* TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 */
+ "ECDHE-PSK-ARIA256-SHA384", /* TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 */
+
+ /* blacklisted SEED encryptions */
+ "SEED-SHA", /*TLS_RSA_WITH_SEED_CBC_SHA */
+ "DH-DSS-SEED-SHA", /* TLS_DH_DSS_WITH_SEED_CBC_SHA */
+ "DH-RSA-SEED-SHA", /* TLS_DH_RSA_WITH_SEED_CBC_SHA */
+ "DHE-DSS-SEED-SHA", /* TLS_DHE_DSS_WITH_SEED_CBC_SHA */
+ "DHE-RSA-SEED-SHA", /* TLS_DHE_RSA_WITH_SEED_CBC_SHA */
+ "ADH-SEED-SHA", /* TLS_DH_anon_WITH_SEED_CBC_SHA */
+
+ /* blacklisted KRB5 ciphers */
+ "KRB5-DES-CBC-SHA", /* TLS_KRB5_WITH_DES_CBC_SHA */
+ "KRB5-DES-CBC3-SHA", /* TLS_KRB5_WITH_3DES_EDE_CBC_SHA */
+ "KRB5-IDEA-CBC-SHA", /* TLS_KRB5_WITH_IDEA_CBC_SHA */
+ "KRB5-DES-CBC-MD5", /* TLS_KRB5_WITH_DES_CBC_MD5 */
+ "KRB5-DES-CBC3-MD5", /* TLS_KRB5_WITH_3DES_EDE_CBC_MD5 */
+ "KRB5-IDEA-CBC-MD5", /* TLS_KRB5_WITH_IDEA_CBC_MD5 */
+ "EXP-KRB5-DES-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA */
+ "EXP-KRB5-DES-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 */
+ "EXP-KRB5-RC2-CBC-SHA", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA */
+ "EXP-KRB5-RC2-CBC-MD5", /* TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 */
+
+ /* blacklisted exoticas */
+ "DHE-DSS-CBC-SHA", /* TLS_DHE_DSS_WITH_DES_CBC_SHA */
+ "IDEA-CBC-SHA", /* TLS_RSA_WITH_IDEA_CBC_SHA */
+
+ /* not really sure if the following names are correct */
+ "SSL3_CK_SCSV", /* TLS_EMPTY_RENEGOTIATION_INFO_SCSV */
+ "SSL3_CK_FALLBACK_SCSV"
+};
+static size_t RFC7540_names_LEN = sizeof(RFC7540_names)/sizeof(RFC7540_names[0]);
+
+
+static apr_hash_t *BLCNames;
+
+static void cipher_init(apr_pool_t *pool)
+{
+ apr_hash_t *hash = apr_hash_make(pool);
+ const char *source;
+ unsigned int i;
+
+ source = "rfc7540";
+ for (i = 0; i < RFC7540_names_LEN; ++i) {
+ apr_hash_set(hash, RFC7540_names[i], APR_HASH_KEY_STRING, source);
+ }
+
+ BLCNames = hash;
+}
+
+static int cipher_is_blacklisted(const char *cipher, const char **psource)
+{
+ *psource = apr_hash_get(BLCNames, cipher, APR_HASH_KEY_STRING);
+ return !!*psource;
+}
+
+/*******************************************************************************
+ * Hooks for processing incoming connections:
+ * - process_conn take over connection in case of h2
+ */
+static int h2_h2_process_conn(conn_rec* c);
+static int h2_h2_pre_close_conn(conn_rec* c);
+static int h2_h2_post_read_req(request_rec *r);
+
+/*******************************************************************************
+ * Once per lifetime init, retrieve optional functions
+ */
+apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s)
+{
+ (void)pool;
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init");
+ opt_ssl_engine_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable);
+ opt_ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
+ opt_ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup);
+
+ if (!opt_ssl_is_https || !opt_ssl_var_lookup) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+ APLOGNO(02951) "mod_ssl does not seem to be enabled");
+ }
+
+ cipher_init(pool);
+
+ return APR_SUCCESS;
+}
+
+int h2_h2_is_tls(conn_rec *c)
+{
+ return opt_ssl_is_https && opt_ssl_is_https(c);
+}
+
+int h2_is_acceptable_connection(conn_rec *c, int require_all)
+{
+ int is_tls = h2_h2_is_tls(c);
+ const h2_config *cfg = h2_config_get(c);
+
+ if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) {
+ /* Check TLS connection for modern TLS parameters, as defined in
+ * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ */
+ apr_pool_t *pool = c->pool;
+ server_rec *s = c->base_server;
+ char *val;
+
+ if (!opt_ssl_var_lookup) {
+ /* unable to check */
+ return 0;
+ }
+
+ /* Need Tlsv1.2 or higher, rfc 7540, ch. 9.2
+ */
+ val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_PROTOCOL");
+ if (val && *val) {
+ if (strncmp("TLS", val, 3)
+ || !strcmp("TLSv1", val)
+ || !strcmp("TLSv1.1", val)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050)
+ "h2_h2(%ld): tls protocol not suitable: %s",
+ (long)c->id, val);
+ return 0;
+ }
+ }
+ else if (require_all) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03051)
+ "h2_h2(%ld): tls protocol is indetermined", (long)c->id);
+ return 0;
+ }
+
+ /* Check TLS cipher blacklist
+ */
+ val = opt_ssl_var_lookup(pool, s, c, NULL, (char*)"SSL_CIPHER");
+ if (val && *val) {
+ const char *source;
+ if (cipher_is_blacklisted(val, &source)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052)
+ "h2_h2(%ld): tls cipher %s blacklisted by %s",
+ (long)c->id, val, source);
+ return 0;
+ }
+ }
+ else if (require_all) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053)
+ "h2_h2(%ld): tls cipher is indetermined", (long)c->id);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int h2_allows_h2_direct(conn_rec *c)
+{
+ const h2_config *cfg = h2_config_get(c);
+ int is_tls = h2_h2_is_tls(c);
+ const char *needed_protocol = is_tls? "h2" : "h2c";
+ int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT);
+
+ if (h2_direct < 0) {
+ h2_direct = is_tls? 0 : 1;
+ }
+ return (h2_direct
+ && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
+}
+
+int h2_allows_h2_upgrade(conn_rec *c)
+{
+ const h2_config *cfg = h2_config_get(c);
+ int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE);
+
+ return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c));
+}
+
+/*******************************************************************************
+ * Register various hooks
+ */
+static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
+static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL};
+
+void h2_h2_register_hooks(void)
+{
+ /* Our main processing needs to run quite late. Definitely after mod_ssl,
+ * as we need its connection filters, but also before reqtimeout as its
+ * method of timeouts is specific to HTTP/1.1 (as of now).
+ * The core HTTP/1 processing run as REALLY_LAST, so we will have
+ * a chance to take over before it.
+ */
+ ap_hook_process_connection(h2_h2_process_conn,
+ mod_ssl, mod_reqtimeout, APR_HOOK_LAST);
+
+ /* One last chance to properly say goodbye if we have not done so
+ * already. */
+ ap_hook_pre_close_connection(h2_h2_pre_close_conn, NULL, mod_ssl, APR_HOOK_LAST);
+
+ /* With "H2SerializeHeaders On", we install the filter in this hook
+ * that parses the response. This needs to happen before any other post
+ * read function terminates the request with an error. Otherwise we will
+ * never see the response.
+ */
+ ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST);
+}
+
+int h2_h2_process_conn(conn_rec* c)
+{
+ apr_status_t status;
+ h2_ctx *ctx;
+
+ if (c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
+ if (h2_ctx_is_task(ctx)) {
+ /* our stream pseudo connection */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined");
+ return DECLINED;
+ }
+
+ if (!ctx && c->keepalives == 0) {
+ const char *proto = ap_get_protocol(c);
+
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
+ "new connection using protocol '%s', direct=%d, "
+ "tls acceptable=%d", proto, h2_allows_h2_direct(c),
+ h2_is_acceptable_connection(c, 1));
+ }
+
+ if (!strcmp(AP_PROTOCOL_HTTP1, proto)
+ && h2_allows_h2_direct(c)
+ && h2_is_acceptable_connection(c, 1)) {
+ /* Fresh connection still is on http/1.1 and H2Direct is enabled.
+ * Otherwise connection is in a fully acceptable state.
+ * -> peek at the first 24 incoming bytes
+ */
+ apr_bucket_brigade *temp;
+ char *s = NULL;
+ apr_size_t slen;
+
+ temp = apr_brigade_create(c->pool, c->bucket_alloc);
+ status = ap_get_brigade(c->input_filters, temp,
+ AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24);
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054)
+ "h2_h2, error reading 24 bytes speculative");
+ apr_brigade_destroy(temp);
+ return DECLINED;
+ }
+
+ apr_brigade_pflatten(temp, &s, &slen, c->pool);
+ if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, direct mode detected");
+ if (!ctx) {
+ ctx = h2_ctx_get(c, 1);
+ }
+ h2_ctx_protocol_set(ctx, h2_h2_is_tls(c)? "h2" : "h2c");
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, not detected in %d bytes: %s",
+ (int)slen, s);
+ }
+
+ apr_brigade_destroy(temp);
+ }
+ }
+
+ if (ctx) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
+ if (!h2_ctx_session_get(ctx)) {
+ status = h2_conn_setup(ctx, c, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ return h2_conn_run(ctx, c);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined");
+ return DECLINED;
+}
+
+static int h2_h2_pre_close_conn(conn_rec *c)
+{
+ h2_ctx *ctx;
+
+ /* slave connection? */
+ if (c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ if (ctx) {
+ /* If the session has been closed correctly already, we will not
+ * fiond a h2_ctx here. The presence indicates that the session
+ * is still ongoing. */
+ return h2_conn_pre_close(ctx, c);
+ }
+ return DECLINED;
+}
+
+static int h2_h2_post_read_req(request_rec *r)
+{
+ /* slave connection? */
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ struct h2_task *task = h2_ctx_get_task(ctx);
+ /* This hook will get called twice on internal redirects. Take care
+ * that we manipulate filters only once. */
+ if (task && !task->filters_set) {
+ ap_filter_t *f;
+
+ /* setup the correct output filters to process the response
+ * on the proper mod_http2 way. */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "adding task output filter");
+ if (task->ser_headers) {
+ ap_add_output_filter("H1_TO_H2_RESP", task, r, r->connection);
+ }
+ else {
+ /* replace the core http filter that formats response headers
+ * in HTTP/1 with our own that collects status and headers */
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+ ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
+ }
+
+ /* trailers processing. Incoming trailers are added to this
+ * request via our h2 input filter, outgoing trailers
+ * in a special h2 out filter. */
+ for (f = r->input_filters; f; f = f->next) {
+ if (!strcmp("H2_TO_H1", f->frec->name)) {
+ f->r = r;
+ break;
+ }
+ }
+ ap_add_output_filter("H2_TRAILERS", task, r, r->connection);
+ task->filters_set = 1;
+ }
+ }
+ return DECLINED;
+}
+
diff --git a/modules/http2/h2_h2.h b/modules/http2/h2_h2.h
new file mode 100644
index 00000000..592001e9
--- /dev/null
+++ b/modules/http2/h2_h2.h
@@ -0,0 +1,78 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_h2__
+#define __mod_h2__h2_h2__
+
+/**
+ * List of ALPN protocol identifiers that we suport in cleartext
+ * negotiations. NULL terminated.
+ */
+extern const char *h2_clear_protos[];
+
+/**
+ * List of ALPN protocol identifiers that we support in TLS encrypted
+ * negotiations. NULL terminated.
+ */
+extern const char *h2_tls_protos[];
+
+/**
+ * Provide a user readable description of the HTTP/2 error code-
+ * @param h2_error http/2 error code, as in rfc 7540, ch. 7
+ * @return textual description of code or that it is unknown.
+ */
+const char *h2_h2_err_description(unsigned int h2_error);
+
+/*
+ * One time, post config intialization.
+ */
+apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s);
+
+/* Is the connection a TLS connection?
+ */
+int h2_h2_is_tls(conn_rec *c);
+
+/* Register apache hooks for h2 protocol
+ */
+void h2_h2_register_hooks(void);
+
+/**
+ * Check if the given connection fulfills the requirements as configured.
+ * @param c the connection
+ * @param require_all != 0 iff any missing connection properties make
+ * the test fail. For example, a cipher might not have been selected while
+ * the handshake is still ongoing.
+ * @return != 0 iff connection requirements are met
+ */
+int h2_is_acceptable_connection(conn_rec *c, int require_all);
+
+/**
+ * Check if the "direct" HTTP/2 mode of protocol handling is enabled
+ * for the given connection.
+ * @param c the connection to check
+ * @return != 0 iff direct mode is enabled
+ */
+int h2_allows_h2_direct(conn_rec *c);
+
+/**
+ * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
+ * for the given connection.
+ * @param c the connection to check
+ * @return != 0 iff Upgrade switching is enabled
+ */
+int h2_allows_h2_upgrade(conn_rec *c);
+
+
+#endif /* defined(__mod_h2__h2_h2__) */
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
new file mode 100644
index 00000000..001eb7f6
--- /dev/null
+++ b/modules/http2/h2_mplx.c
@@ -0,0 +1,1458 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "mod_http2.h"
+
+#include "h2_private.h"
+#include "h2_bucket_beam.h"
+#include "h2_config.h"
+#include "h2_conn.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
+#include "h2_response.h"
+#include "h2_mplx.h"
+#include "h2_ngn_shed.h"
+#include "h2_request.h"
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_worker.h"
+#include "h2_workers.h"
+#include "h2_util.h"
+
+
+static void h2_beam_log(h2_bucket_beam *beam, int id, const char *msg,
+ conn_rec *c, int level)
+{
+ if (beam && APLOG_C_IS_LEVEL(c,level)) {
+ char buffer[2048];
+ apr_size_t off = 0;
+
+ off += apr_snprintf(buffer+off, H2_ALEN(buffer)-off, "cl=%d, ", beam->closed);
+ off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "red", ", ", &beam->red);
+ off += h2_util_bb_print(buffer+off, H2_ALEN(buffer)-off, "green", ", ", beam->green);
+ off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "hold", ", ", &beam->hold);
+ off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "purge", "", &beam->purge);
+
+ ap_log_cerror(APLOG_MARK, level, 0, c, "beam(%ld-%d): %s %s",
+ c->id, id, msg, buffer);
+ }
+}
+
+/* utility for iterating over ihash task sets */
+typedef struct {
+ h2_mplx *m;
+ h2_task *task;
+ apr_time_t now;
+} task_iter_ctx;
+
+/* NULL or the mutex hold by this thread, used for recursive calls
+ */
+static apr_threadkey_t *thread_lock;
+
+apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
+{
+ return apr_threadkey_private_create(&thread_lock, NULL, pool);
+}
+
+static apr_status_t enter_mutex(h2_mplx *m, int *pacquired)
+{
+ apr_status_t status;
+ void *mutex = NULL;
+
+ /* Enter the mutex if this thread already holds the lock or
+ * if we can acquire it. Only on the later case do we unlock
+ * onleaving the mutex.
+ * This allow recursive entering of the mutex from the saem thread,
+ * which is what we need in certain situations involving callbacks
+ */
+ AP_DEBUG_ASSERT(m);
+ apr_threadkey_private_get(&mutex, thread_lock);
+ if (mutex == m->lock) {
+ *pacquired = 0;
+ return APR_SUCCESS;
+ }
+
+ AP_DEBUG_ASSERT(m->lock);
+ status = apr_thread_mutex_lock(m->lock);
+ *pacquired = (status == APR_SUCCESS);
+ if (*pacquired) {
+ apr_threadkey_private_set(m->lock, thread_lock);
+ }
+ return status;
+}
+
+static void leave_mutex(h2_mplx *m, int acquired)
+{
+ if (acquired) {
+ apr_threadkey_private_set(NULL, thread_lock);
+ apr_thread_mutex_unlock(m->lock);
+ }
+}
+
+static void beam_leave(void *ctx, apr_thread_mutex_t *lock)
+{
+ leave_mutex(ctx, 1);
+}
+
+static apr_status_t beam_enter(void *ctx, h2_beam_lock *pbl)
+{
+ h2_mplx *m = ctx;
+ int acquired;
+ apr_status_t status;
+
+ status = enter_mutex(m, &acquired);
+ if (status == APR_SUCCESS) {
+ pbl->mutex = m->lock;
+ pbl->leave = acquired? beam_leave : NULL;
+ pbl->leave_ctx = m;
+ }
+ return status;
+}
+
+static void stream_output_consumed(void *ctx,
+ h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_task *task = ctx;
+ if (length > 0 && task && task->assigned) {
+ h2_req_engine_out_consumed(task->assigned, task->c, length);
+ }
+}
+
+static void stream_input_consumed(void *ctx,
+ h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_mplx *m = ctx;
+ if (m->input_consumed && length) {
+ m->input_consumed(m->input_consumed_ctx, beam->id, length);
+ }
+}
+
+static int can_beam_file(void *ctx, h2_bucket_beam *beam, apr_file_t *file)
+{
+ h2_mplx *m = ctx;
+ if (m->tx_handles_reserved > 0) {
+ --m->tx_handles_reserved;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): beaming file %s, tx_avail %d",
+ m->id, beam->id, beam->tag, m->tx_handles_reserved);
+ return 1;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): can_beam_file denied on %s",
+ m->id, beam->id, beam->tag);
+ return 0;
+}
+
+static void have_out_data_for(h2_mplx *m, int stream_id);
+static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master);
+
+static void check_tx_reservation(h2_mplx *m)
+{
+ if (m->tx_handles_reserved <= 0) {
+ m->tx_handles_reserved += h2_workers_tx_reserve(m->workers,
+ H2MIN(m->tx_chunk_size, h2_ihash_count(m->tasks)));
+ }
+}
+
+static void check_tx_free(h2_mplx *m)
+{
+ if (m->tx_handles_reserved > m->tx_chunk_size) {
+ apr_size_t count = m->tx_handles_reserved - m->tx_chunk_size;
+ m->tx_handles_reserved = m->tx_chunk_size;
+ h2_workers_tx_free(m->workers, count);
+ }
+ else if (m->tx_handles_reserved && h2_ihash_empty(m->tasks)) {
+ h2_workers_tx_free(m->workers, m->tx_handles_reserved);
+ m->tx_handles_reserved = 0;
+ }
+}
+
+static int purge_stream(void *ctx, void *val)
+{
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ h2_task *task = h2_ihash_get(m->tasks, stream->id);
+ h2_ihash_remove(m->spurge, stream->id);
+ h2_stream_destroy(stream);
+ if (task) {
+ task_destroy(m, task, 1);
+ }
+ return 0;
+}
+
+static void purge_streams(h2_mplx *m)
+{
+ if (!h2_ihash_empty(m->spurge)) {
+ while(!h2_ihash_iter(m->spurge, purge_stream, m)) {
+ /* repeat until empty */
+ }
+ h2_ihash_clear(m->spurge);
+ }
+}
+
+static void h2_mplx_destroy(h2_mplx *m)
+{
+ AP_DEBUG_ASSERT(m);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): destroy, tasks=%d",
+ m->id, (int)h2_ihash_count(m->tasks));
+ check_tx_free(m);
+ if (m->pool) {
+ apr_pool_destroy(m->pool);
+ }
+}
+
+/**
+ * A h2_mplx needs to be thread-safe *and* if will be called by
+ * the h2_session thread *and* the h2_worker threads. Therefore:
+ * - calls are protected by a mutex lock, m->lock
+ * - the pool needs its own allocator, since apr_allocator_t are
+ * not re-entrant. The separate allocator works without a
+ * separate lock since we already protect h2_mplx itself.
+ * Since HTTP/2 connections can be expected to live longer than
+ * their HTTP/1 cousins, the separate allocator seems to work better
+ * than protecting a shared h2_session one with an own lock.
+ */
+h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
+ const h2_config *conf,
+ apr_interval_time_t stream_timeout,
+ h2_workers *workers)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_allocator_t *allocator = NULL;
+ h2_mplx *m;
+ AP_DEBUG_ASSERT(conf);
+
+ status = apr_allocator_create(&allocator);
+ if (status != APR_SUCCESS) {
+ return NULL;
+ }
+
+ m = apr_pcalloc(parent, sizeof(h2_mplx));
+ if (m) {
+ m->id = c->id;
+ APR_RING_ELEM_INIT(m, link);
+ m->c = c;
+ apr_pool_create_ex(&m->pool, parent, NULL, allocator);
+ if (!m->pool) {
+ return NULL;
+ }
+ apr_pool_tag(m->pool, "h2_mplx");
+ apr_allocator_owner_set(allocator, m->pool);
+
+ status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (status != APR_SUCCESS) {
+ h2_mplx_destroy(m);
+ return NULL;
+ }
+
+ status = apr_thread_cond_create(&m->task_thawed, m->pool);
+ if (status != APR_SUCCESS) {
+ h2_mplx_destroy(m);
+ return NULL;
+ }
+
+ m->bucket_alloc = apr_bucket_alloc_create(m->pool);
+ m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
+ m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
+
+ m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->q = h2_iq_create(m->pool, m->max_streams);
+ m->sready = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->sresume = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
+
+ m->stream_timeout = stream_timeout;
+ m->workers = workers;
+ m->workers_max = workers->max_workers;
+ m->workers_def_limit = 4;
+ m->workers_limit = m->workers_def_limit;
+ m->last_limit_change = m->last_idle_block = apr_time_now();
+ m->limit_change_interval = apr_time_from_msec(200);
+
+ m->tx_handles_reserved = 0;
+ m->tx_chunk_size = 4;
+
+ m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+
+ m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
+ m->stream_max_mem);
+ h2_ngn_shed_set_ctx(m->ngn_shed , m);
+ }
+ return m;
+}
+
+apr_uint32_t h2_mplx_shutdown(h2_mplx *m)
+{
+ int acquired, max_stream_started = 0;
+
+ if (enter_mutex(m, &acquired) == APR_SUCCESS) {
+ max_stream_started = m->max_stream_started;
+ /* Clear schedule queue, disabling existing streams from starting */
+ h2_iq_clear(m->q);
+ leave_mutex(m, acquired);
+ }
+ return max_stream_started;
+}
+
+static void input_consumed_signal(h2_mplx *m, h2_stream *stream)
+{
+ if (stream->input && stream->started) {
+ h2_beam_send(stream->input, NULL, 0); /* trigger updates */
+ }
+}
+
+static int output_consumed_signal(h2_mplx *m, h2_task *task)
+{
+ if (task->output.beam && task->worker_started && task->assigned) {
+ /* trigger updates */
+ h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
+ }
+ return 0;
+}
+
+
+static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master)
+{
+ conn_rec *slave = NULL;
+ int reuse_slave = 0;
+ apr_status_t status;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_task(%s): destroy", task->id);
+ if (called_from_master) {
+ /* Process outstanding events before destruction */
+ h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ input_consumed_signal(m, stream);
+ }
+ }
+
+ /* The pool is cleared/destroyed which also closes all
+ * allocated file handles. Give this count back to our
+ * file handle pool. */
+ if (task->output.beam) {
+ m->tx_handles_reserved +=
+ h2_beam_get_files_beamed(task->output.beam);
+ h2_beam_on_produced(task->output.beam, NULL, NULL);
+ status = h2_beam_shutdown(task->output.beam, APR_NONBLOCK_READ, 1);
+ if (status != APR_SUCCESS){
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, m->c,
+ APLOGNO(03385) "h2_task(%s): output shutdown "
+ "incomplete", task->id);
+ }
+ }
+
+ slave = task->c;
+ reuse_slave = ((m->spare_slaves->nelts < m->spare_slaves->nalloc)
+ && !task->rst_error);
+
+ h2_ihash_remove(m->tasks, task->stream_id);
+ if (m->redo_tasks) {
+ h2_ihash_remove(m->redo_tasks, task->stream_id);
+ }
+ h2_task_destroy(task);
+
+ if (slave) {
+ if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
+ APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
+ }
+ else {
+ slave->sbh = NULL;
+ h2_slave_destroy(slave, NULL);
+ }
+ }
+
+ check_tx_free(m);
+}
+
+static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error)
+{
+ h2_task *task;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_stream(%ld-%d): done", m->c->id, stream->id);
+ /* Situation: we are, on the master connection, done with processing
+ * the stream. Either we have handled it successfully, or the stream
+ * was reset by the client or the connection is gone and we are
+ * shutting down the whole session.
+ *
+ * We possibly have created a task for this stream to be processed
+ * on a slave connection. The processing might actually be ongoing
+ * right now or has already finished. A finished task waits for its
+ * stream to be done. This is the common case.
+ *
+ * If the stream had input (e.g. the request had a body), a task
+ * may have read, or is still reading buckets from the input beam.
+ * This means that the task is referencing memory from the stream's
+ * pool (or the master connection bucket alloc). Before we can free
+ * the stream pool, we need to make sure that those references are
+ * gone. This is what h2_beam_shutdown() on the input waits for.
+ *
+ * With the input handled, we can tear down that beam and care
+ * about the output beam. The stream might still have buffered some
+ * buckets read from the output, so we need to get rid of those. That
+ * is done by h2_stream_cleanup().
+ *
+ * Now it is save to destroy the task (if it exists and is finished).
+ *
+ * FIXME: we currently destroy the stream, even if the task is still
+ * ongoing. This is not ok, since task->request is coming from stream
+ * memory. We should either copy it on task creation or wait with the
+ * stream destruction until the task is done.
+ */
+ h2_iq_remove(m->q, stream->id);
+ h2_ihash_remove(m->sready, stream->id);
+ h2_ihash_remove(m->sresume, stream->id);
+ h2_ihash_remove(m->streams, stream->id);
+ if (stream->input) {
+ m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input);
+ h2_beam_on_consumed(stream->input, NULL, NULL);
+ /* Let anyone blocked reading know that there is no more to come */
+ h2_beam_abort(stream->input);
+ /* Remove mutex after, so that abort still finds cond to signal */
+ h2_beam_mutex_set(stream->input, NULL, NULL, NULL);
+ }
+ h2_stream_cleanup(stream);
+
+ task = h2_ihash_get(m->tasks, stream->id);
+ if (task) {
+ if (!task->worker_done) {
+ /* task still running, cleanup once it is done */
+ if (rst_error) {
+ h2_task_rst(task, rst_error);
+ }
+ h2_ihash_add(m->shold, stream);
+ return;
+ }
+ else {
+ /* already finished */
+ task_destroy(m, task, 0);
+ }
+ }
+ h2_stream_destroy(stream);
+}
+
+static int stream_done_iter(void *ctx, void *val)
+{
+ stream_done((h2_mplx*)ctx, val, 0);
+ return 0;
+}
+
+static int task_print(void *ctx, void *val)
+{
+ h2_mplx *m = ctx;
+ h2_task *task = val;
+
+ if (task && task->request) {
+ h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
+
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
+ "->03198: h2_stream(%s): %s %s %s -> %s %d"
+ "[orph=%d/started=%d/done=%d]",
+ task->id, task->request->method,
+ task->request->authority, task->request->path,
+ task->response? "http" : (task->rst_error? "reset" : "?"),
+ task->response? task->response->http_status : task->rst_error,
+ (stream? 0 : 1), task->worker_started,
+ task->worker_done);
+ }
+ else if (task) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
+ "->03198: h2_stream(%ld-%d): NULL", m->id, task->stream_id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
+ "->03198: h2_stream(%ld-NULL): NULL", m->id);
+ }
+ return 1;
+}
+
+static int task_abort_connection(void *ctx, void *val)
+{
+ h2_task *task = val;
+ if (task->c) {
+ task->c->aborted = 1;
+ }
+ if (task->input.beam) {
+ h2_beam_abort(task->input.beam);
+ }
+ if (task->output.beam) {
+ h2_beam_abort(task->output.beam);
+ }
+ return 1;
+}
+
+static int report_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld-%d): exists, started=%d, scheduled=%d, "
+ "submitted=%d, suspended=%d",
+ m->id, stream->id, stream->started, stream->scheduled,
+ stream->submitted, stream->suspended);
+ return 1;
+}
+
+apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
+{
+ apr_status_t status;
+ int acquired;
+
+ h2_workers_unregister(m->workers, m);
+
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ int i, wait_secs = 5;
+
+ if (!h2_ihash_empty(m->streams) && APLOGctrace1(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): release_join with %d streams open, "
+ "%d streams resume, %d streams ready, %d tasks",
+ m->id, (int)h2_ihash_count(m->streams),
+ (int)h2_ihash_count(m->sresume),
+ (int)h2_ihash_count(m->sready),
+ (int)h2_ihash_count(m->tasks));
+ h2_ihash_iter(m->streams, report_stream_iter, m);
+ }
+
+ /* disable WINDOW_UPDATE callbacks */
+ h2_mplx_set_consumed_cb(m, NULL, NULL);
+
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): start release_join with %d streams in hold",
+ m->id, (int)h2_ihash_count(m->shold));
+ }
+ if (!h2_ihash_empty(m->spurge)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): start release_join with %d streams to purge",
+ m->id, (int)h2_ihash_count(m->spurge));
+ }
+
+ h2_iq_clear(m->q);
+ apr_thread_cond_broadcast(m->task_thawed);
+ while (!h2_ihash_iter(m->streams, stream_done_iter, m)) {
+ /* iterate until all streams have been removed */
+ }
+ AP_DEBUG_ASSERT(h2_ihash_empty(m->streams));
+
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): 2. release_join with %d streams in hold",
+ m->id, (int)h2_ihash_count(m->shold));
+ }
+ if (!h2_ihash_empty(m->spurge)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): 2. release_join with %d streams to purge",
+ m->id, (int)h2_ihash_count(m->spurge));
+ }
+
+ /* If we still have busy workers, we cannot release our memory
+ * pool yet, as tasks have references to us.
+ * Any operation on the task slave connection will from now on
+ * be errored ECONNRESET/ABORTED, so processing them should fail
+ * and workers *should* return in a timely fashion.
+ */
+ for (i = 0; m->workers_busy > 0; ++i) {
+ h2_ihash_iter(m->tasks, task_abort_connection, m);
+
+ m->join_wait = wait;
+ status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
+
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ if (i > 0) {
+ /* Oh, oh. Still we wait for assigned workers to report that
+ * they are done. Unless we have a bug, a worker seems to be hanging.
+ * If we exit now, all will be deallocated and the worker, once
+ * it does return, will walk all over freed memory...
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03198)
+ "h2_mplx(%ld): release, waiting for %d seconds now for "
+ "%d h2_workers to return, have still %d tasks outstanding",
+ m->id, i*wait_secs, m->workers_busy,
+ (int)h2_ihash_count(m->tasks));
+ if (i == 1) {
+ h2_ihash_iter(m->tasks, task_print, m);
+ }
+ }
+ h2_mplx_abort(m);
+ apr_thread_cond_broadcast(m->task_thawed);
+ }
+ }
+
+ AP_DEBUG_ASSERT(h2_ihash_empty(m->shold));
+ if (!h2_ihash_empty(m->spurge)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): 3. release_join %d streams to purge",
+ m->id, (int)h2_ihash_count(m->spurge));
+ purge_streams(m);
+ }
+ AP_DEBUG_ASSERT(h2_ihash_empty(m->spurge));
+
+ if (!h2_ihash_empty(m->tasks)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03056)
+ "h2_mplx(%ld): release_join -> destroy, "
+ "%d tasks still present",
+ m->id, (int)h2_ihash_count(m->tasks));
+ }
+ leave_mutex(m, acquired);
+ h2_mplx_destroy(m);
+ /* all gone */
+ }
+ return status;
+}
+
+void h2_mplx_abort(h2_mplx *m)
+{
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if (!m->aborted && enter_mutex(m, &acquired) == APR_SUCCESS) {
+ m->aborted = 1;
+ h2_ngn_shed_abort(m->ngn_shed);
+ leave_mutex(m, acquired);
+ }
+}
+
+apr_status_t h2_mplx_stream_done(h2_mplx *m, h2_stream *stream)
+{
+ apr_status_t status = APR_SUCCESS;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld-%d): marking stream as done.",
+ m->id, stream->id);
+ stream_done(m, stream, stream->rst_error);
+ purge_streams(m);
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx)
+{
+ m->input_consumed = cb;
+ m->input_consumed_ctx = ctx;
+}
+
+static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_task *task = h2_ihash_get(m->tasks, stream_id);
+ h2_stream *stream = h2_ihash_get(m->streams, stream_id);
+
+ if (!task || !stream) {
+ return APR_ECONNABORTED;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%s): open response: %d, rst=%d",
+ task->id, response->http_status, response->rst_error);
+
+ h2_task_set_response(task, response);
+
+ if (task->output.beam) {
+ h2_beam_buffer_size_set(task->output.beam, m->stream_max_mem);
+ h2_beam_timeout_set(task->output.beam, m->stream_timeout);
+ h2_beam_on_consumed(task->output.beam, stream_output_consumed, task);
+ m->tx_handles_reserved -= h2_beam_get_files_beamed(task->output.beam);
+ h2_beam_on_file_beam(task->output.beam, can_beam_file, m);
+ h2_beam_mutex_set(task->output.beam, beam_enter, task->cond, m);
+ }
+
+ h2_ihash_add(m->sready, stream);
+ if (response && response->http_status < 300) {
+ /* we might see some file buckets in the output, see
+ * if we have enough handles reserved. */
+ check_tx_reservation(m);
+ }
+ have_out_data_for(m, stream_id);
+ return status;
+}
+
+apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response)
+{
+ apr_status_t status;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ status = out_open(m, stream_id, response);
+ }
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+static apr_status_t out_close(h2_mplx *m, h2_task *task)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_stream *stream;
+
+ if (!task) {
+ return APR_ECONNABORTED;
+ }
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (!stream) {
+ return APR_ECONNABORTED;
+ }
+
+ if (!task->response && !task->rst_error) {
+ /* In case a close comes before a response was created,
+ * insert an error one so that our streams can properly reset.
+ */
+ h2_response *r = h2_response_die(task->stream_id, 500,
+ task->request, m->pool);
+ status = out_open(m, task->stream_id, r);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, APLOGNO(03393)
+ "h2_mplx(%s): close, no response, no rst", task->id);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
+ "h2_mplx(%s): close", task->id);
+ if (task->output.beam) {
+ status = h2_beam_close(task->output.beam);
+ h2_beam_log(task->output.beam, task->stream_id, "out_close", m->c,
+ APLOG_TRACE2);
+ }
+ output_consumed_signal(m, task);
+ have_out_data_for(m, task->stream_id);
+ return status;
+}
+
+apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+ apr_thread_cond_t *iowait)
+{
+ apr_status_t status;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else if (!h2_ihash_empty(m->sready) || !h2_ihash_empty(m->sresume)) {
+ status = APR_SUCCESS;
+ }
+ else {
+ purge_streams(m);
+ m->added_output = iowait;
+ status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
+ if (APLOGctrace2(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): trywait on data for %f ms)",
+ m->id, timeout/1000.0);
+ }
+ m->added_output = NULL;
+ }
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+static void have_out_data_for(h2_mplx *m, int stream_id)
+{
+ (void)stream_id;
+ AP_DEBUG_ASSERT(m);
+ if (m->added_output) {
+ apr_thread_cond_signal(m->added_output);
+ }
+}
+
+apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
+{
+ apr_status_t status;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_iq_sort(m->q, cmp, ctx);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): reprioritize tasks", m->id);
+ }
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
+ h2_stream_pri_cmp *cmp, void *ctx)
+{
+ apr_status_t status;
+ int do_registration = 0;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_ihash_add(m->streams, stream);
+ if (stream->response) {
+ /* already have a respone, schedule for submit */
+ h2_ihash_add(m->sready, stream);
+ }
+ else {
+ h2_beam_create(&stream->input, stream->pool, stream->id,
+ "input", 0);
+ if (!m->need_registration) {
+ m->need_registration = h2_iq_empty(m->q);
+ }
+ if (m->workers_busy < m->workers_max) {
+ do_registration = m->need_registration;
+ }
+ h2_iq_add(m->q, stream->id, cmp, ctx);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): process, body=%d",
+ m->c->id, stream->id, stream->request->body);
+ }
+ }
+ leave_mutex(m, acquired);
+ }
+ if (do_registration) {
+ m->need_registration = 0;
+ h2_workers_register(m->workers, m);
+ }
+ return status;
+}
+
+static h2_task *pop_task(h2_mplx *m)
+{
+ h2_task *task = NULL;
+ h2_stream *stream;
+ int sid;
+ while (!m->aborted && !task && (m->workers_busy < m->workers_limit)
+ && (sid = h2_iq_shift(m->q)) > 0) {
+
+ stream = h2_ihash_get(m->streams, sid);
+ if (stream) {
+ conn_rec *slave, **pslave;
+ int new_conn = 0;
+
+ pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
+ if (pslave) {
+ slave = *pslave;
+ }
+ else {
+ slave = h2_slave_create(m->c, m->pool, NULL);
+ new_conn = 1;
+ }
+
+ slave->sbh = m->c->sbh;
+ slave->aborted = 0;
+ task = h2_task_create(slave, stream->request, stream->input, m);
+ h2_ihash_add(m->tasks, task);
+
+ m->c->keepalives++;
+ apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id);
+ if (new_conn) {
+ h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
+ }
+ stream->started = 1;
+ task->worker_started = 1;
+ task->started_at = apr_time_now();
+ if (sid > m->max_stream_started) {
+ m->max_stream_started = sid;
+ }
+
+ if (stream->input) {
+ h2_beam_timeout_set(stream->input, m->stream_timeout);
+ h2_beam_on_consumed(stream->input, stream_input_consumed, m);
+ h2_beam_on_file_beam(stream->input, can_beam_file, m);
+ h2_beam_mutex_set(stream->input, beam_enter, task->cond, m);
+ }
+
+ ++m->workers_busy;
+ }
+ }
+ return task;
+}
+
+h2_task *h2_mplx_pop_task(h2_mplx *m, int *has_more)
+{
+ h2_task *task = NULL;
+ apr_status_t status;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ if (m->aborted) {
+ *has_more = 0;
+ }
+ else {
+ task = pop_task(m);
+ *has_more = !h2_iq_empty(m->q);
+ }
+
+ if (has_more && !task) {
+ m->need_registration = 1;
+ }
+ leave_mutex(m, acquired);
+ }
+ return task;
+}
+
+static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
+{
+ if (task->frozen) {
+ /* this task was handed over to an engine for processing
+ * and the original worker has finished. That means the
+ * engine may start processing now. */
+ h2_task_thaw(task);
+ /* we do not want the task to block on writing response
+ * bodies into the mplx. */
+ h2_task_set_io_blocking(task, 0);
+ apr_thread_cond_broadcast(m->task_thawed);
+ return;
+ }
+ else {
+ h2_stream *stream;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): task(%s) done", m->id, task->id);
+ out_close(m, task);
+ stream = h2_ihash_get(m->streams, task->stream_id);
+
+ if (ngn) {
+ apr_off_t bytes = 0;
+ if (task->output.beam) {
+ h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
+ bytes += h2_beam_get_buffered(task->output.beam);
+ }
+ if (bytes > 0) {
+ /* we need to report consumed and current buffered output
+ * to the engine. The request will be streamed out or cancelled,
+ * no more data is coming from it and the engine should update
+ * its calculations before we destroy this information. */
+ h2_req_engine_out_consumed(ngn, task->c, bytes);
+ }
+ }
+
+ if (task->engine) {
+ if (!h2_req_engine_is_shutdown(task->engine)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
+ "h2_mplx(%ld): task(%s) has not-shutdown "
+ "engine(%s)", m->id, task->id,
+ h2_req_engine_get_id(task->engine));
+ }
+ h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
+ }
+
+ if (!m->aborted && stream && m->redo_tasks
+ && h2_ihash_get(m->redo_tasks, task->stream_id)) {
+ /* reset and schedule again */
+ h2_task_redo(task);
+ h2_ihash_remove(m->redo_tasks, task->stream_id);
+ h2_iq_add(m->q, task->stream_id, NULL, NULL);
+ return;
+ }
+
+ task->worker_done = 1;
+ task->done_at = apr_time_now();
+ if (task->output.beam) {
+ h2_beam_on_consumed(task->output.beam, NULL, NULL);
+ h2_beam_mutex_set(task->output.beam, NULL, NULL, NULL);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): request done, %f ms elapsed", task->id,
+ (task->done_at - task->started_at) / 1000.0);
+ if (task->started_at > m->last_idle_block) {
+ /* this task finished without causing an 'idle block', e.g.
+ * a block by flow control.
+ */
+ if (task->done_at- m->last_limit_change >= m->limit_change_interval
+ && m->workers_limit < m->workers_max) {
+ /* Well behaving stream, allow it more workers */
+ m->workers_limit = H2MIN(m->workers_limit * 2,
+ m->workers_max);
+ m->last_limit_change = task->done_at;
+ m->need_registration = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): increase worker limit to %d",
+ m->id, m->workers_limit);
+ }
+ }
+
+ if (stream) {
+ /* hang around until the stream deregisters */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): task_done, stream still open",
+ task->id);
+ if (h2_stream_is_suspended(stream)) {
+ /* more data will not arrive, resume the stream */
+ h2_ihash_add(m->sresume, stream);
+ have_out_data_for(m, stream->id);
+ }
+ }
+ else {
+ /* stream no longer active, was it placed in hold? */
+ stream = h2_ihash_get(m->shold, task->stream_id);
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): task_done, stream in hold",
+ task->id);
+ /* We cannot destroy the stream here since this is
+ * called from a worker thread and freeing memory pools
+ * is only safe in the only thread using it (and its
+ * parent pool / allocator) */
+ h2_ihash_remove(m->shold, stream->id);
+ h2_ihash_add(m->spurge, stream);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): task_done, stream not found",
+ task->id);
+ task_destroy(m, task, 0);
+ }
+
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
+ }
+ }
+ }
+}
+
+void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
+{
+ int acquired;
+
+ if (enter_mutex(m, &acquired) == APR_SUCCESS) {
+ task_done(m, task, NULL);
+ --m->workers_busy;
+ if (ptask) {
+ /* caller wants another task */
+ *ptask = pop_task(m);
+ }
+ leave_mutex(m, acquired);
+ }
+}
+
+/*******************************************************************************
+ * h2_mplx DoS protection
+ ******************************************************************************/
+
+static int latest_repeatable_unsubmitted_iter(void *data, void *val)
+{
+ task_iter_ctx *ctx = data;
+ h2_task *task = val;
+ if (!task->worker_done && h2_task_can_redo(task)
+ && !h2_ihash_get(ctx->m->redo_tasks, task->stream_id)) {
+ /* this task occupies a worker, the response has not been submitted yet,
+ * not been cancelled and it is a repeatable request
+ * -> it can be re-scheduled later */
+ if (!ctx->task || ctx->task->started_at < task->started_at) {
+ /* we did not have one or this one was started later */
+ ctx->task = task;
+ }
+ }
+ return 1;
+}
+
+static h2_task *get_latest_repeatable_unsubmitted_task(h2_mplx *m)
+{
+ task_iter_ctx ctx;
+ ctx.m = m;
+ ctx.task = NULL;
+ h2_ihash_iter(m->tasks, latest_repeatable_unsubmitted_iter, &ctx);
+ return ctx.task;
+}
+
+static int timed_out_busy_iter(void *data, void *val)
+{
+ task_iter_ctx *ctx = data;
+ h2_task *task = val;
+ if (!task->worker_done
+ && (ctx->now - task->started_at) > ctx->m->stream_timeout) {
+ /* timed out stream occupying a worker, found */
+ ctx->task = task;
+ return 0;
+ }
+ return 1;
+}
+
+static h2_task *get_timed_out_busy_task(h2_mplx *m)
+{
+ task_iter_ctx ctx;
+ ctx.m = m;
+ ctx.task = NULL;
+ ctx.now = apr_time_now();
+ h2_ihash_iter(m->tasks, timed_out_busy_iter, &ctx);
+ return ctx.task;
+}
+
+static apr_status_t unschedule_slow_tasks(h2_mplx *m)
+{
+ h2_task *task;
+ int n;
+
+ if (!m->redo_tasks) {
+ m->redo_tasks = h2_ihash_create(m->pool, offsetof(h2_task, stream_id));
+ }
+ /* Try to get rid of streams that occupy workers. Look for safe requests
+ * that are repeatable. If none found, fail the connection.
+ */
+ n = (m->workers_busy - m->workers_limit - h2_ihash_count(m->redo_tasks));
+ while (n > 0 && (task = get_latest_repeatable_unsubmitted_task(m))) {
+ h2_task_rst(task, H2_ERR_CANCEL);
+ h2_ihash_add(m->redo_tasks, task);
+ --n;
+ }
+
+ if ((m->workers_busy - h2_ihash_count(m->redo_tasks)) > m->workers_limit) {
+ task = get_timed_out_busy_task(m);
+ if (task) {
+ /* Too many busy workers, unable to cancel enough streams
+ * and with a busy, timed out stream, we tell the client
+ * to go away... */
+ return APR_TIMEUP;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_mplx_idle(h2_mplx *m)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_time_t now;
+ int acquired;
+
+ if (enter_mutex(m, &acquired) == APR_SUCCESS) {
+ apr_size_t scount = h2_ihash_count(m->streams);
+ if (scount > 0 && m->workers_busy) {
+ /* If we have streams in connection state 'IDLE', meaning
+ * all streams are ready to sent data out, but lack
+ * WINDOW_UPDATEs.
+ *
+ * This is ok, unless we have streams that still occupy
+ * h2 workers. As worker threads are a scarce resource,
+ * we need to take measures that we do not get DoSed.
+ *
+ * This is what we call an 'idle block'. Limit the amount
+ * of busy workers we allow for this connection until it
+ * well behaves.
+ */
+ now = apr_time_now();
+ m->last_idle_block = now;
+ if (m->workers_limit > 2
+ && now - m->last_limit_change >= m->limit_change_interval) {
+ if (m->workers_limit > 16) {
+ m->workers_limit = 16;
+ }
+ else if (m->workers_limit > 8) {
+ m->workers_limit = 8;
+ }
+ else if (m->workers_limit > 4) {
+ m->workers_limit = 4;
+ }
+ else if (m->workers_limit > 2) {
+ m->workers_limit = 2;
+ }
+ m->last_limit_change = now;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): decrease worker limit to %d",
+ m->id, m->workers_limit);
+ }
+
+ if (m->workers_busy > m->workers_limit) {
+ status = unschedule_slow_tasks(m);
+ }
+ }
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * HTTP/2 request engines
+ ******************************************************************************/
+
+typedef struct {
+ h2_mplx * m;
+ h2_req_engine *ngn;
+ int streams_updated;
+} ngn_update_ctx;
+
+static int ngn_update_window(void *ctx, void *val)
+{
+ ngn_update_ctx *uctx = ctx;
+ h2_task *task = val;
+ if (task && task->assigned == uctx->ngn
+ && output_consumed_signal(uctx->m, task)) {
+ ++uctx->streams_updated;
+ }
+ return 1;
+}
+
+static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn)
+{
+ ngn_update_ctx ctx;
+
+ ctx.m = m;
+ ctx.ngn = ngn;
+ ctx.streams_updated = 0;
+ h2_ihash_iter(m->tasks, ngn_update_window, &ctx);
+
+ return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN;
+}
+
+apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
+ request_rec *r,
+ http2_req_engine_init *einit)
+{
+ apr_status_t status;
+ h2_mplx *m;
+ h2_task *task;
+ int acquired;
+
+ task = h2_ctx_rget_task(r);
+ if (!task) {
+ return APR_ECONNABORTED;
+ }
+ m = task->mplx;
+ task->r = r;
+
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
+
+ if (stream) {
+ status = h2_ngn_shed_push_task(m->ngn_shed, ngn_type, task, einit);
+ }
+ else {
+ status = APR_ECONNABORTED;
+ }
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn,
+ apr_read_type_e block,
+ apr_uint32_t capacity,
+ request_rec **pr)
+{
+ h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn);
+ h2_mplx *m = h2_ngn_shed_get_ctx(shed);
+ apr_status_t status;
+ h2_task *task = NULL;
+ int acquired;
+
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ int want_shutdown = (block == APR_BLOCK_READ);
+
+ /* Take this opportunity to update output consummation
+ * for this engine */
+ ngn_out_update_windows(m, ngn);
+
+ if (want_shutdown && !h2_iq_empty(m->q)) {
+ /* For a blocking read, check first if requests are to be
+ * had and, if not, wait a short while before doing the
+ * blocking, and if unsuccessful, terminating read.
+ */
+ status = h2_ngn_shed_pull_task(shed, ngn, capacity, 1, &task);
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): start block engine pull", m->id);
+ apr_thread_cond_timedwait(m->task_thawed, m->lock,
+ apr_time_from_msec(20));
+ status = h2_ngn_shed_pull_task(shed, ngn, capacity, 1, &task);
+ }
+ }
+ else {
+ status = h2_ngn_shed_pull_task(shed, ngn, capacity,
+ want_shutdown, &task);
+ }
+ leave_mutex(m, acquired);
+ }
+ *pr = task? task->r : NULL;
+ return status;
+}
+
+void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn)
+{
+ h2_task *task = h2_ctx_cget_task(r_conn);
+
+ if (task) {
+ h2_mplx *m = task->mplx;
+ int acquired;
+
+ if (enter_mutex(m, &acquired) == APR_SUCCESS) {
+ ngn_out_update_windows(m, ngn);
+ h2_ngn_shed_done_task(m->ngn_shed, ngn, task);
+ if (task->engine) {
+ /* cannot report that as done until engine returns */
+ }
+ else {
+ task_done(m, task, ngn);
+ }
+ /* Take this opportunity to update output consummation
+ * for this engine */
+ leave_mutex(m, acquired);
+ }
+ }
+}
+
+/*******************************************************************************
+ * mplx master events dispatching
+ ******************************************************************************/
+
+static int update_window(void *ctx, void *val)
+{
+ input_consumed_signal(ctx, val);
+ return 1;
+}
+
+apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+ stream_ev_callback *on_resume,
+ stream_ev_callback *on_response,
+ void *on_ctx)
+{
+ apr_status_t status;
+ int acquired;
+ int streams[32];
+ h2_stream *stream;
+ h2_task *task;
+ size_t i, n;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld): dispatch events", m->id);
+
+ /* update input windows for streams */
+ h2_ihash_iter(m->streams, update_window, m);
+
+ if (on_response && !h2_ihash_empty(m->sready)) {
+ n = h2_ihash_ishift(m->sready, streams, H2_ALEN(streams));
+ for (i = 0; i < n; ++i) {
+ stream = h2_ihash_get(m->streams, streams[i]);
+ if (!stream) {
+ continue;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): on_response",
+ m->id, stream->id);
+ task = h2_ihash_get(m->tasks, stream->id);
+ if (task) {
+ task->submitted = 1;
+ if (task->rst_error) {
+ h2_stream_rst(stream, task->rst_error);
+ }
+ else {
+ AP_DEBUG_ASSERT(task->response);
+ h2_stream_set_response(stream, task->response, task->output.beam);
+ }
+ }
+ else {
+ /* We have the stream ready without a task. This happens
+ * when we fail streams early. A response should already
+ * be present. */
+ AP_DEBUG_ASSERT(stream->response || stream->rst_error);
+ }
+ status = on_response(on_ctx, stream->id);
+ }
+ }
+
+ if (on_resume && !h2_ihash_empty(m->sresume)) {
+ n = h2_ihash_ishift(m->sresume, streams, H2_ALEN(streams));
+ for (i = 0; i < n; ++i) {
+ stream = h2_ihash_get(m->streams, streams[i]);
+ if (!stream) {
+ continue;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
+ "h2_mplx(%ld-%d): on_resume",
+ m->id, stream->id);
+ h2_stream_set_suspended(stream, 0);
+ status = on_resume(on_ctx, stream->id);
+ }
+ }
+
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
+
+static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+{
+ h2_mplx *m = ctx;
+ apr_status_t status;
+ h2_stream *stream;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ stream = h2_ihash_get(m->streams, beam->id);
+ if (stream && h2_stream_is_suspended(stream)) {
+ h2_ihash_add(m->sresume, stream);
+ h2_beam_on_produced(beam, NULL, NULL);
+ have_out_data_for(m, beam->id);
+ }
+ leave_mutex(m, acquired);
+ }
+}
+
+apr_status_t h2_mplx_suspend_stream(h2_mplx *m, int stream_id)
+{
+ apr_status_t status;
+ h2_stream *stream;
+ h2_task *task;
+ int acquired;
+
+ AP_DEBUG_ASSERT(m);
+ if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
+ stream = h2_ihash_get(m->streams, stream_id);
+ if (stream) {
+ h2_stream_set_suspended(stream, 1);
+ task = h2_ihash_get(m->tasks, stream->id);
+ if (stream->started && (!task || task->worker_done)) {
+ h2_ihash_add(m->sresume, stream);
+ }
+ else {
+ /* register callback so that we can resume on new output */
+ h2_beam_on_produced(task->output.beam, output_produced, m);
+ }
+ }
+ leave_mutex(m, acquired);
+ }
+ return status;
+}
diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
new file mode 100644
index 00000000..821e6d65
--- /dev/null
+++ b/modules/http2/h2_mplx.h
@@ -0,0 +1,348 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_mplx__
+#define __mod_h2__h2_mplx__
+
+/**
+ * The stream multiplexer. It pushes buckets from the connection
+ * thread to the stream threads and vice versa. It's thread-safe
+ * to use.
+ *
+ * There is one h2_mplx instance for each h2_session, which sits on top
+ * of a particular httpd conn_rec. Input goes from the connection to
+ * the stream tasks. Output goes from the stream tasks to the connection,
+ * e.g. the client.
+ *
+ * For each stream, there can be at most "H2StreamMaxMemSize" output bytes
+ * queued in the multiplexer. If a task thread tries to write more
+ * data, it is blocked until space becomes available.
+ *
+ * Writing input is never blocked. In order to use flow control on the input,
+ * the mplx can be polled for input data consumption.
+ */
+
+struct apr_pool_t;
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+struct h2_bucket_beam;
+struct h2_config;
+struct h2_ihash_t;
+struct h2_response;
+struct h2_task;
+struct h2_stream;
+struct h2_request;
+struct apr_thread_cond_t;
+struct h2_workers;
+struct h2_iqueue;
+struct h2_ngn_shed;
+struct h2_req_engine;
+
+#include <apr_queue.h>
+
+typedef struct h2_mplx h2_mplx;
+
+/**
+ * Callback invoked for every stream that had input data read since
+ * the last invocation.
+ */
+typedef void h2_mplx_consumed_cb(void *ctx, int stream_id, apr_off_t consumed);
+
+struct h2_mplx {
+ long id;
+ conn_rec *c;
+ apr_pool_t *pool;
+ apr_bucket_alloc_t *bucket_alloc;
+
+ APR_RING_ENTRY(h2_mplx) link;
+
+ unsigned int aborted : 1;
+ unsigned int need_registration : 1;
+
+ struct h2_ihash_t *streams; /* all streams currently processing */
+ struct h2_ihash_t *shold; /* all streams done with task ongoing */
+ struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
+
+ struct h2_iqueue *q; /* all stream ids that need to be started */
+ struct h2_ihash_t *sready; /* all streams ready for response */
+ struct h2_ihash_t *sresume; /* all streams that can be resumed */
+
+ struct h2_ihash_t *tasks; /* all tasks started and not destroyed */
+ struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */
+
+ apr_uint32_t max_streams; /* max # of concurrent streams */
+ apr_uint32_t max_stream_started; /* highest stream id that started processing */
+ apr_uint32_t workers_busy; /* # of workers processing on this mplx */
+ apr_uint32_t workers_limit; /* current # of workers limit, dynamic */
+ apr_uint32_t workers_def_limit; /* default # of workers limit */
+ apr_uint32_t workers_max; /* max, hard limit # of workers in a process */
+ apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
+ * streams were ready */
+ apr_time_t last_limit_change; /* last time, worker limit changed */
+ apr_interval_time_t limit_change_interval;
+
+ apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *added_output;
+ struct apr_thread_cond_t *task_thawed;
+ struct apr_thread_cond_t *join_wait;
+
+ apr_size_t stream_max_mem;
+ apr_interval_time_t stream_timeout;
+
+ apr_pool_t *spare_io_pool;
+ apr_array_header_t *spare_slaves; /* spare slave connections */
+
+ struct h2_workers *workers;
+ int tx_handles_reserved;
+ apr_size_t tx_chunk_size;
+
+ h2_mplx_consumed_cb *input_consumed;
+ void *input_consumed_ctx;
+
+ struct h2_ngn_shed *ngn_shed;
+};
+
+
+
+/*******************************************************************************
+ * Object lifecycle and information.
+ ******************************************************************************/
+
+apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s);
+
+/**
+ * Create the multiplexer for the given HTTP2 session.
+ * Implicitly has reference count 1.
+ */
+h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master,
+ const struct h2_config *conf,
+ apr_interval_time_t stream_timeout,
+ struct h2_workers *workers);
+
+/**
+ * Decreases the reference counter of this mplx and waits for it
+ * to reached 0, destroy the mplx afterwards.
+ * This is to be called from the thread that created the mplx in
+ * the first place.
+ * @param m the mplx to be released and destroyed
+ * @param wait condition var to wait on for ref counter == 0
+ */
+apr_status_t h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
+
+/**
+ * Aborts the multiplexer. It will answer all future invocation with
+ * APR_ECONNABORTED, leading to early termination of ongoing streams.
+ */
+void h2_mplx_abort(h2_mplx *mplx);
+
+struct h2_task *h2_mplx_pop_task(h2_mplx *mplx, int *has_more);
+
+void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
+
+/**
+ * Shut down the multiplexer gracefully. Will no longer schedule new streams
+ * but let the ongoing ones finish normally.
+ * @return the highest stream id being/been processed
+ */
+apr_uint32_t h2_mplx_shutdown(h2_mplx *m);
+
+/*******************************************************************************
+ * IO lifetime of streams.
+ ******************************************************************************/
+
+/**
+ * Notifies mplx that a stream has finished processing.
+ *
+ * @param m the mplx itself
+ * @param stream the id of the stream being done
+ * @param rst_error if != 0, the stream was reset with the error given
+ *
+ */
+apr_status_t h2_mplx_stream_done(h2_mplx *m, struct h2_stream *stream);
+
+/**
+ * Waits on output data from any stream in this session to become available.
+ * Returns APR_TIMEUP if no data arrived in the given time.
+ */
+apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+ struct apr_thread_cond_t *iowait);
+
+/*******************************************************************************
+ * Stream processing.
+ ******************************************************************************/
+
+/**
+ * Process a stream request.
+ *
+ * @param m the multiplexer
+ * @param stream the identifier of the stream
+ * @param r the request to be processed
+ * @param cmp the stream priority compare function
+ * @param ctx context data for the compare function
+ */
+apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
+ h2_stream_pri_cmp *cmp, void *ctx);
+
+/**
+ * Stream priorities have changed, reschedule pending requests.
+ *
+ * @param m the multiplexer
+ * @param cmp the stream priority compare function
+ * @param ctx context data for the compare function
+ */
+apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
+
+/**
+ * Register a callback for the amount of input data consumed per stream. The
+ * will only ever be invoked from the thread creating this h2_mplx, e.g. when
+ * calls from that thread into this h2_mplx are made.
+ *
+ * @param m the multiplexer to register the callback at
+ * @param cb the function to invoke
+ * @param ctx user supplied argument to invocation.
+ */
+void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx);
+
+
+typedef apr_status_t stream_ev_callback(void *ctx, int stream_id);
+
+/**
+ * Dispatch events for the master connection, such as
+ * - resume: new output data has arrived for a suspended stream
+ * - response: the response for a stream is ready
+ */
+apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
+ stream_ev_callback *on_resume,
+ stream_ev_callback *on_response,
+ void *ctx);
+
+apr_status_t h2_mplx_suspend_stream(h2_mplx *m, int stream_id);
+
+/*******************************************************************************
+ * Output handling of streams.
+ ******************************************************************************/
+
+/**
+ * Opens the output for the given stream with the specified response.
+ */
+apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
+ struct h2_response *response);
+
+/*******************************************************************************
+ * h2_mplx list Manipulation.
+ ******************************************************************************/
+
+/**
+ * The magic pointer value that indicates the head of a h2_mplx list
+ * @param b The mplx list
+ * @return The magic pointer value
+ */
+#define H2_MPLX_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_mplx, link)
+
+/**
+ * Determine if the mplx list is empty
+ * @param b The list to check
+ * @return true or false
+ */
+#define H2_MPLX_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_mplx, link)
+
+/**
+ * Return the first mplx in a list
+ * @param b The list to query
+ * @return The first mplx in the list
+ */
+#define H2_MPLX_LIST_FIRST(b) APR_RING_FIRST(b)
+
+/**
+ * Return the last mplx in a list
+ * @param b The list to query
+ * @return The last mplx int he list
+ */
+#define H2_MPLX_LIST_LAST(b) APR_RING_LAST(b)
+
+/**
+ * Insert a single mplx at the front of a list
+ * @param b The list to add to
+ * @param e The mplx to insert
+ */
+#define H2_MPLX_LIST_INSERT_HEAD(b, e) do { \
+h2_mplx *ap__b = (e); \
+APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link); \
+} while (0)
+
+/**
+ * Insert a single mplx at the end of a list
+ * @param b The list to add to
+ * @param e The mplx to insert
+ */
+#define H2_MPLX_LIST_INSERT_TAIL(b, e) do { \
+h2_mplx *ap__b = (e); \
+APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \
+} while (0)
+
+/**
+ * Get the next mplx in the list
+ * @param e The current mplx
+ * @return The next mplx
+ */
+#define H2_MPLX_NEXT(e) APR_RING_NEXT((e), link)
+/**
+ * Get the previous mplx in the list
+ * @param e The current mplx
+ * @return The previous mplx
+ */
+#define H2_MPLX_PREV(e) APR_RING_PREV((e), link)
+
+/**
+ * Remove a mplx from its list
+ * @param e The mplx to remove
+ */
+#define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link)
+
+/*******************************************************************************
+ * h2_mplx DoS protection
+ ******************************************************************************/
+
+/**
+ * Master connection has entered idle mode.
+ * @param m the mplx instance of the master connection
+ * @return != SUCCESS iff connection should be terminated
+ */
+apr_status_t h2_mplx_idle(h2_mplx *m);
+
+/*******************************************************************************
+ * h2_req_engine handling
+ ******************************************************************************/
+
+typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
+typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_uint32_t req_buffer_size,
+ request_rec *r,
+ h2_output_consumed **pconsumed,
+ void **pbaton);
+
+apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
+ request_rec *r,
+ h2_mplx_req_engine_init *einit);
+apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn,
+ apr_read_type_e block,
+ apr_uint32_t capacity,
+ request_rec **pr);
+void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn);
+
+#endif /* defined(__mod_h2__h2_mplx__) */
diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c
new file mode 100644
index 00000000..f0676421
--- /dev/null
+++ b/modules/http2/h2_ngn_shed.c
@@ -0,0 +1,366 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "mod_http2.h"
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_config.h"
+#include "h2_conn.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_response.h"
+#include "h2_request.h"
+#include "h2_task.h"
+#include "h2_util.h"
+#include "h2_ngn_shed.h"
+
+
+typedef struct h2_ngn_entry h2_ngn_entry;
+struct h2_ngn_entry {
+ APR_RING_ENTRY(h2_ngn_entry) link;
+ h2_task *task;
+};
+
+#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link)
+#define H2_NGN_ENTRY_PREV(e) APR_RING_PREV((e), link)
+#define H2_NGN_ENTRY_REMOVE(e) APR_RING_REMOVE((e), link)
+
+#define H2_REQ_ENTRIES_SENTINEL(b) APR_RING_SENTINEL((b), h2_ngn_entry, link)
+#define H2_REQ_ENTRIES_EMPTY(b) APR_RING_EMPTY((b), h2_ngn_entry, link)
+#define H2_REQ_ENTRIES_FIRST(b) APR_RING_FIRST(b)
+#define H2_REQ_ENTRIES_LAST(b) APR_RING_LAST(b)
+
+#define H2_REQ_ENTRIES_INSERT_HEAD(b, e) do { \
+h2_ngn_entry *ap__b = (e); \
+APR_RING_INSERT_HEAD((b), ap__b, h2_ngn_entry, link); \
+} while (0)
+
+#define H2_REQ_ENTRIES_INSERT_TAIL(b, e) do { \
+h2_ngn_entry *ap__b = (e); \
+APR_RING_INSERT_TAIL((b), ap__b, h2_ngn_entry, link); \
+} while (0)
+
+struct h2_req_engine {
+ const char *id; /* identifier */
+ const char *type; /* name of the engine type */
+ apr_pool_t *pool; /* pool for engine specific allocations */
+ conn_rec *c; /* connection this engine is assigned to */
+ h2_task *task; /* the task this engine is base on, running in */
+ h2_ngn_shed *shed;
+
+ unsigned int shutdown : 1; /* engine is being shut down */
+ unsigned int done : 1; /* engine has finished */
+
+ APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries;
+ apr_uint32_t capacity; /* maximum concurrent requests */
+ apr_uint32_t no_assigned; /* # of assigned requests */
+ apr_uint32_t no_live; /* # of live */
+ apr_uint32_t no_finished; /* # of finished */
+
+ h2_output_consumed *out_consumed;
+ void *out_consumed_ctx;
+};
+
+const char *h2_req_engine_get_id(h2_req_engine *engine)
+{
+ return engine->id;
+}
+
+int h2_req_engine_is_shutdown(h2_req_engine *engine)
+{
+ return engine->shutdown;
+}
+
+void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c,
+ apr_off_t bytes)
+{
+ if (engine->out_consumed) {
+ engine->out_consumed(engine->out_consumed_ctx, c, bytes);
+ }
+}
+
+h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
+ apr_uint32_t default_capacity,
+ apr_uint32_t req_buffer_size)
+{
+ h2_ngn_shed *shed;
+
+ shed = apr_pcalloc(pool, sizeof(*shed));
+ shed->c = c;
+ shed->pool = pool;
+ shed->default_capacity = default_capacity;
+ shed->req_buffer_size = req_buffer_size;
+ shed->ngns = apr_hash_make(pool);
+
+ return shed;
+}
+
+void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx)
+{
+ shed->user_ctx = user_ctx;
+}
+
+void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed)
+{
+ return shed->user_ctx;
+}
+
+h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn)
+{
+ return ngn->shed;
+}
+
+void h2_ngn_shed_abort(h2_ngn_shed *shed)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03394)
+ "h2_ngn_shed(%ld): abort", shed->c->id);
+ shed->aborted = 1;
+}
+
+static void ngn_add_task(h2_req_engine *ngn, h2_task *task)
+{
+ h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry));
+ APR_RING_ELEM_INIT(entry, link);
+ entry->task = task;
+ H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry);
+}
+
+
+apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type,
+ h2_task *task, http2_req_engine_init *einit)
+{
+ h2_req_engine *ngn;
+
+ AP_DEBUG_ASSERT(shed);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id,
+ task->id);
+ if (task->ser_headers) {
+ /* Max compatibility, deny processing of this */
+ return APR_EOF;
+ }
+
+ ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING);
+ if (ngn && !ngn->shutdown) {
+ /* this task will be processed in another thread,
+ * freeze any I/O for the time being. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_ngn_shed(%ld): pushing request %s to %s",
+ shed->c->id, task->id, ngn->id);
+ if (!h2_task_is_detached(task)) {
+ h2_task_freeze(task);
+ }
+ /* FIXME: sometimes ngn is garbage, probly alread freed */
+ ngn_add_task(ngn, task);
+ ngn->no_assigned++;
+ return APR_SUCCESS;
+ }
+
+ /* no existing engine or being shut down, start a new one */
+ if (einit) {
+ apr_status_t status;
+ apr_pool_t *pool = task->pool;
+ h2_req_engine *newngn;
+
+ newngn = apr_pcalloc(pool, sizeof(*ngn));
+ newngn->pool = pool;
+ newngn->id = apr_psprintf(pool, "ngn-%s", task->id);
+ newngn->type = apr_pstrdup(pool, ngn_type);
+ newngn->c = task->c;
+ newngn->shed = shed;
+ newngn->capacity = shed->default_capacity;
+ newngn->no_assigned = 1;
+ newngn->no_live = 1;
+ APR_RING_INIT(&newngn->entries, h2_ngn_entry, link);
+
+ status = einit(newngn, newngn->id, newngn->type, newngn->pool,
+ shed->req_buffer_size, task->r,
+ &newngn->out_consumed, &newngn->out_consumed_ctx);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395)
+ "h2_ngn_shed(%ld): create engine %s (%s)",
+ shed->c->id, newngn->id, newngn->type);
+ if (status == APR_SUCCESS) {
+ AP_DEBUG_ASSERT(task->engine == NULL);
+ newngn->task = task;
+ task->engine = newngn;
+ task->assigned = newngn;
+ apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn);
+ }
+ return status;
+ }
+ return APR_EOF;
+}
+
+static h2_ngn_entry *pop_detached(h2_req_engine *ngn)
+{
+ h2_ngn_entry *entry;
+ for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
+ entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
+ entry = H2_NGN_ENTRY_NEXT(entry)) {
+ if (h2_task_is_detached(entry->task)
+ || (entry->task->engine == ngn)) {
+ /* The task hosting this engine can always be pulled by it.
+ * For other task, they need to become detached, e.g. no longer
+ * assigned to another worker. */
+ H2_NGN_ENTRY_REMOVE(entry);
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed,
+ h2_req_engine *ngn,
+ apr_uint32_t capacity,
+ int want_shutdown,
+ h2_task **ptask)
+{
+ h2_ngn_entry *entry;
+
+ AP_DEBUG_ASSERT(ngn);
+ *ptask = NULL;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03396)
+ "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d",
+ shed->c->id, ngn->id, want_shutdown);
+ if (shed->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03397)
+ "h2_ngn_shed(%ld): abort while pulling requests %s",
+ shed->c->id, ngn->id);
+ ngn->shutdown = 1;
+ return APR_ECONNABORTED;
+ }
+
+ ngn->capacity = capacity;
+ if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
+ if (want_shutdown) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s",
+ shed->c->id, ngn->id);
+ ngn->shutdown = 1;
+ }
+ return ngn->shutdown? APR_EOF : APR_EAGAIN;
+ }
+
+ if ((entry = pop_detached(ngn))) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, APLOGNO(03398)
+ "h2_ngn_shed(%ld): pulled request %s for engine %s",
+ shed->c->id, entry->task->id, ngn->id);
+ ngn->no_live++;
+ *ptask = entry->task;
+ entry->task->assigned = ngn;
+ /* task will now run in ngn's own thread. Modules like lua
+ * seem to require the correct thread set in the conn_rec.
+ * See PR 59542. */
+ if (entry->task->c && ngn->c) {
+ entry->task->c->current_thread = ngn->c->current_thread;
+ }
+ if (entry->task->engine == ngn) {
+ /* If an engine pushes its own base task, and then pulls
+ * it back to itself again, it needs to be thawed.
+ */
+ h2_task_thaw(entry->task);
+ }
+ return APR_SUCCESS;
+ }
+
+ if (1) {
+ h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03399)
+ "h2_ngn_shed(%ld): pull task, nothing, first task %s",
+ shed->c->id, entry->task->id);
+ }
+ return APR_EAGAIN;
+}
+
+static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn,
+ h2_task *task, int waslive, int aborted)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03400)
+ "h2_ngn_shed(%ld): task %s %s by %s",
+ shed->c->id, task->id, aborted? "aborted":"done", ngn->id);
+ ngn->no_finished++;
+ if (waslive) ngn->no_live--;
+ ngn->no_assigned--;
+ task->assigned = NULL;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed,
+ struct h2_req_engine *ngn, h2_task *task)
+{
+ return ngn_done_task(shed, ngn, task, 1, 0);
+}
+
+void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn)
+{
+ if (ngn->done) {
+ return;
+ }
+
+ if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
+ h2_ngn_entry *entry;
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c,
+ "h2_ngn_shed(%ld): exit engine %s (%s), "
+ "has still requests queued, shutdown=%d,"
+ "assigned=%ld, live=%ld, finished=%ld",
+ shed->c->id, ngn->id, ngn->type,
+ ngn->shutdown,
+ (long)ngn->no_assigned, (long)ngn->no_live,
+ (long)ngn->no_finished);
+ for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
+ entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
+ entry = H2_NGN_ENTRY_NEXT(entry)) {
+ h2_task *task = entry->task;
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c,
+ "h2_ngn_shed(%ld): engine %s has queued task %s, "
+ "frozen=%d, aborting",
+ shed->c->id, ngn->id, task->id, task->frozen);
+ ngn_done_task(shed, ngn, task, 0, 1);
+ }
+ }
+ if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c,
+ "h2_ngn_shed(%ld): exit engine %s (%s), "
+ "assigned=%ld, live=%ld, finished=%ld",
+ shed->c->id, ngn->id, ngn->type,
+ (long)ngn->no_assigned, (long)ngn->no_live,
+ (long)ngn->no_finished);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
+ "h2_ngn_shed(%ld): exit engine %s",
+ shed->c->id, ngn->id);
+ }
+
+ apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL);
+ ngn->done = 1;
+}
diff --git a/modules/http2/h2_ngn_shed.h b/modules/http2/h2_ngn_shed.h
new file mode 100644
index 00000000..832dbd3a
--- /dev/null
+++ b/modules/http2/h2_ngn_shed.h
@@ -0,0 +1,76 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_req_shed_h
+#define h2_req_shed_h
+
+struct h2_req_engine;
+struct h2_task;
+
+typedef struct h2_ngn_shed h2_ngn_shed;
+struct h2_ngn_shed {
+ conn_rec *c;
+ apr_pool_t *pool;
+ apr_hash_t *ngns;
+ void *user_ctx;
+
+ unsigned int aborted : 1;
+
+ apr_uint32_t default_capacity;
+ apr_uint32_t req_buffer_size; /* preferred buffer size for responses */
+};
+
+const char *h2_req_engine_get_id(h2_req_engine *engine);
+int h2_req_engine_is_shutdown(h2_req_engine *engine);
+
+void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c,
+ apr_off_t bytes);
+
+typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_uint32_t req_buffer_size,
+ request_rec *r,
+ h2_output_consumed **pconsumed,
+ void **pbaton);
+
+h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
+ apr_uint32_t default_capactiy,
+ apr_uint32_t req_buffer_size);
+
+void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx);
+void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed);
+
+h2_ngn_shed *h2_ngn_shed_get_shed(struct h2_req_engine *ngn);
+
+void h2_ngn_shed_abort(h2_ngn_shed *shed);
+
+apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type,
+ struct h2_task *task,
+ h2_shed_ngn_init *init_cb);
+
+apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed, h2_req_engine *pub_ngn,
+ apr_uint32_t capacity,
+ int want_shutdown, struct h2_task **ptask);
+
+apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed,
+ struct h2_req_engine *ngn,
+ struct h2_task *task);
+
+void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn);
+
+
+#endif /* h2_req_shed_h */
diff --git a/modules/http2/h2_private.h b/modules/http2/h2_private.h
new file mode 100644
index 00000000..b6861369
--- /dev/null
+++ b/modules/http2/h2_private.h
@@ -0,0 +1,27 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef mod_h2_h2_private_h
+#define mod_h2_h2_private_h
+
+#include <apr_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+extern module AP_MODULE_DECLARE_DATA http2_module;
+
+APLOG_USE_MODULE(http2);
+
+#endif
diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
new file mode 100644
index 00000000..79a2e82e
--- /dev/null
+++ b/modules/http2/h2_proxy_session.c
@@ -0,0 +1,1368 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <apr_strings.h>
+#include <nghttp2/nghttp2.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <mod_proxy.h>
+
+#include "mod_http2.h"
+#include "h2.h"
+#include "h2_proxy_util.h"
+#include "h2_proxy_session.h"
+
+APLOG_USE_MODULE(proxy_http2);
+
+typedef struct h2_proxy_stream {
+ int id;
+ apr_pool_t *pool;
+ h2_proxy_session *session;
+
+ const char *url;
+ request_rec *r;
+ h2_request *req;
+ int standalone;
+
+ h2_stream_state_t state;
+ unsigned int suspended : 1;
+ unsigned int data_sent : 1;
+ unsigned int data_received : 1;
+ uint32_t error_code;
+
+ apr_bucket_brigade *input;
+ apr_bucket_brigade *output;
+
+ apr_table_t *saves;
+} h2_proxy_stream;
+
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg);
+
+
+static apr_status_t proxy_session_pre_close(void *theconn)
+{
+ proxy_conn_rec *p_conn = (proxy_conn_rec *)theconn;
+ h2_proxy_session *session = p_conn->data;
+
+ if (session && session->ngh2) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "proxy_session(%s): pool cleanup, state=%d, streams=%d",
+ session->id, session->state,
+ (int)h2_ihash_count(session->streams));
+ session->aborted = 1;
+ dispatch_event(session, H2_PROXYS_EV_PRE_CLOSE, 0, NULL);
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ p_conn->data = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+static int proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ proxy_conn_rec *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
+ int flush)
+{
+ apr_status_t status;
+ apr_off_t transferred;
+
+ if (flush) {
+ apr_bucket *e = apr_bucket_flush_create(bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ }
+ apr_brigade_length(bb, 0, &transferred);
+ if (transferred != -1)
+ p_conn->worker->s->transferred += transferred;
+ status = ap_pass_brigade(origin->output_filters, bb);
+ /* Cleanup the brigade now to avoid buckets lifetime
+ * issues in case of error returned below. */
+ apr_brigade_cleanup(bb);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, origin, APLOGNO(03357)
+ "pass output failed to %pI (%s)",
+ p_conn->addr, p_conn->hostname);
+ }
+ return status;
+}
+
+static ssize_t raw_send(nghttp2_session *ngh2, const uint8_t *data,
+ size_t length, int flags, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ apr_bucket *b;
+ apr_status_t status;
+ int flush = 1;
+
+ if (data) {
+ b = apr_bucket_transient_create((const char*)data, length,
+ session->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->output, b);
+ }
+
+ status = proxy_pass_brigade(session->c->bucket_alloc,
+ session->p_conn, session->c,
+ session->output, flush);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_sesssion(%s): raw_send %d bytes, flush=%d",
+ session->id, (int)length, flush);
+ if (status != APR_SUCCESS) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ return length;
+}
+
+static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ int n;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03341)
+ "h2_proxy_session(%s): recv FRAME[%s]",
+ session->id, buffer);
+ }
+
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ break;
+ case NGHTTP2_PUSH_PROMISE:
+ break;
+ case NGHTTP2_SETTINGS:
+ if (frame->settings.niv > 0) {
+ n = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS);
+ if (n > 0) {
+ session->remote_max_concurrent = n;
+ }
+ }
+ break;
+ case NGHTTP2_GOAWAY:
+ /* we expect the remote server to tell us the highest stream id
+ * that it has started processing. */
+ session->last_stream_id = frame->goaway.last_stream_id;
+ dispatch_event(session, H2_PROXYS_EV_REMOTE_GOAWAY, 0, NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int before_frame_send(nghttp2_session *ngh2,
+ const nghttp2_frame *frame, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03343)
+ "h2_proxy_session(%s): sent FRAME[%s]",
+ session->id, buffer);
+ }
+ return 0;
+}
+
+static int add_header(void *table, const char *n, const char *v)
+{
+ apr_table_addn(table, n, v);
+ return 1;
+}
+
+static void process_proxy_header(request_rec *r, const char *n, const char *v)
+{
+ static const struct {
+ const char *name;
+ ap_proxy_header_reverse_map_fn func;
+ } transform_hdrs[] = {
+ { "Location", ap_proxy_location_reverse_map },
+ { "Content-Location", ap_proxy_location_reverse_map },
+ { "URI", ap_proxy_location_reverse_map },
+ { "Destination", ap_proxy_location_reverse_map },
+ { "Set-Cookie", ap_proxy_cookie_reverse_map },
+ { NULL, NULL }
+ };
+ proxy_dir_conf *dconf;
+ int i;
+
+ for (i = 0; transform_hdrs[i].name; ++i) {
+ if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ apr_table_add(r->headers_out, n,
+ (*transform_hdrs[i].func)(r, dconf, v));
+ return;
+ }
+ }
+ apr_table_add(r->headers_out, n, v);
+}
+
+static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream,
+ const char *n, apr_size_t nlen,
+ const char *v, apr_size_t vlen)
+{
+ if (n[0] == ':') {
+ if (!stream->data_received && !strncmp(":status", n, nlen)) {
+ char *s = apr_pstrndup(stream->r->pool, v, vlen);
+
+ apr_table_setn(stream->r->notes, "proxy-status", s);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got status %s",
+ stream->session->id, stream->id, s);
+ stream->r->status = (int)apr_atoi64(s);
+ if (stream->r->status <= 0) {
+ stream->r->status = 500;
+ return APR_EGENERAL;
+ }
+ }
+ return APR_SUCCESS;
+ }
+
+ if (!h2_proxy_res_ignore_header(n, nlen)) {
+ char *hname, *hvalue;
+
+ hname = apr_pstrndup(stream->pool, n, nlen);
+ h2_util_camel_case_header(hname, nlen);
+ hvalue = apr_pstrndup(stream->pool, v, vlen);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_proxy_stream(%s-%d): got header %s: %s",
+ stream->session->id, stream->id, hname, hvalue);
+ process_proxy_header(stream->r, hname, hvalue);
+ }
+ return APR_SUCCESS;
+}
+
+static int log_header(void *ctx, const char *key, const char *value)
+{
+ h2_proxy_stream *stream = ctx;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out %s: %s",
+ stream->session->id, stream->id, key, value);
+ return 1;
+}
+
+static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream)
+{
+ h2_proxy_session *session = stream->session;
+ request_rec *r = stream->r;
+ apr_pool_t *p = r->pool;
+
+ /* Now, add in the cookies from the response to the ones already saved */
+ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
+
+ /* and now load 'em all in */
+ if (!apr_is_empty_table(stream->saves)) {
+ apr_table_unset(r->headers_out, "Set-Cookie");
+ r->headers_out = apr_table_overlay(p, r->headers_out, stream->saves);
+ }
+
+ /* handle Via header in response */
+ if (session->conf->viaopt != via_off
+ && session->conf->viaopt != via_block) {
+ const char *server_name = ap_get_server_name(stream->r);
+ apr_port_t port = ap_get_server_port(stream->r);
+ char portstr[32];
+
+ /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
+ * then the server name returned by ap_get_server_name() is the
+ * origin server name (which does make too much sense with Via: headers)
+ * so we use the proxy vhost's name instead.
+ */
+ if (server_name == stream->r->hostname) {
+ server_name = stream->r->server->server_hostname;
+ }
+ if (ap_is_default_port(port, stream->r)) {
+ portstr[0] = '\0';
+ }
+ else {
+ apr_snprintf(portstr, sizeof(portstr), ":%d", port);
+ }
+
+ /* create a "Via:" response header entry and merge it */
+ apr_table_addn(r->headers_out, "Via",
+ (session->conf->viaopt == via_full)
+ ? apr_psprintf(p, "%d.%d %s%s (%s)",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr,
+ AP_SERVER_BASEVERSION)
+ : apr_psprintf(p, "%d.%d %s%s",
+ HTTP_VERSION_MAJOR(r->proto_num),
+ HTTP_VERSION_MINOR(r->proto_num),
+ server_name, portstr)
+ );
+ }
+
+ if (APLOGrtrace2(stream->r)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_stream(%s-%d), header_out after merging",
+ stream->session->id, stream->id);
+ apr_table_do(log_header, stream, stream->r->headers_out, NULL);
+ }
+}
+
+static int on_data_chunk_recv(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id, const uint8_t *data,
+ size_t len, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ apr_bucket *b;
+ apr_status_t status;
+
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03358)
+ "h2_proxy_session(%s): recv data chunk for "
+ "unknown stream %d, ignored",
+ session->id, stream_id);
+ return 0;
+ }
+
+ if (!stream->data_received) {
+ /* last chance to manipulate response headers.
+ * after this, only trailers */
+ h2_proxy_stream_end_headers_out(stream);
+ stream->data_received = 1;
+ }
+
+ b = apr_bucket_transient_create((const char*)data, len,
+ stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ /* always flush after a DATA frame, as we have no other indication
+ * of buffer use */
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, stream->r, APLOGNO(03359)
+ "h2_proxy_session(%s): pass response data for "
+ "stream %d, %d bytes", session->id, stream_id, (int)len);
+ status = ap_pass_brigade(stream->r->output_filters, stream->output);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344)
+ "h2_proxy_session(%s): passing output on stream %d",
+ session->id, stream->id);
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+ if (stream->standalone) {
+ nghttp2_session_consume(ngh2, stream_id, len);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
+ "h2_proxy_session(%s): stream %d, win_update %d bytes",
+ session->id, stream_id, (int)len);
+ }
+ return 0;
+}
+
+static int on_stream_close(nghttp2_session *ngh2, int32_t stream_id,
+ uint32_t error_code, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ if (!session->aborted) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03360)
+ "h2_proxy_session(%s): stream=%d, closed, err=%d",
+ session->id, stream_id, error_code);
+ stream = h2_ihash_get(session->streams, stream_id);
+ if (stream) {
+ stream->error_code = error_code;
+ }
+ dispatch_event(session, H2_PROXYS_EV_STREAM_DONE, stream_id, NULL);
+ }
+ return 0;
+}
+
+static int on_header(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ const uint8_t *namearg, size_t nlen,
+ const uint8_t *valuearg, size_t vlen, uint8_t flags,
+ void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ const char *n = (const char*)namearg;
+ const char *v = (const char*)valuearg;
+
+ (void)session;
+ if (frame->hd.type == NGHTTP2_HEADERS && nlen) {
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+ if (stream) {
+ if (h2_proxy_stream_add_header_out(stream, n, nlen, v, vlen)) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+ }
+ else if (frame->hd.type == NGHTTP2_PUSH_PROMISE) {
+ }
+
+ return 0;
+}
+
+static ssize_t stream_data_read(nghttp2_session *ngh2, int32_t stream_id,
+ uint8_t *buf, size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source, void *user_data)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status = APR_SUCCESS;
+
+ *data_flags = 0;
+ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id);
+ if (!stream) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03361)
+ "h2_proxy_stream(%s): data_read, stream %d not found",
+ stream->session->id, stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (APR_BRIGADE_EMPTY(stream->input)) {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ H2MAX(APR_BUCKET_BUFF_SIZE, length));
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): request body read",
+ stream->session->id, stream->id);
+ }
+
+ if (status == APR_SUCCESS) {
+ ssize_t readlen = 0;
+ while (status == APR_SUCCESS
+ && (readlen < length)
+ && !APR_BRIGADE_EMPTY(stream->input)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(stream->input);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+ else {
+ /* we do nothing more regarding any meta here */
+ }
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+
+ if (status == APR_SUCCESS && blen > 0) {
+ ssize_t copylen = H2MIN(length - readlen, blen);
+ memcpy(buf, bdata, copylen);
+ buf += copylen;
+ readlen += copylen;
+ if (copylen < blen) {
+ /* We have data left in the bucket. Split it. */
+ status = apr_bucket_split(b, copylen);
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%d): request body read %ld bytes, flags=%d",
+ stream->id, (long)readlen, (int)*data_flags);
+ stream->data_sent = 1;
+ return readlen;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+ /* suspended stream, needs to be re-awakened */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
+ "h2_proxy_stream(%s-%d): suspending",
+ stream->session->id, stream_id);
+ stream->suspended = 1;
+ h2_iq_add(stream->session->suspended, stream->id, NULL, NULL);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ else {
+ nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE,
+ stream_id, NGHTTP2_STREAM_CLOSED);
+ return NGHTTP2_ERR_STREAM_CLOSING;
+ }
+}
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done)
+{
+ if (!p_conn->data) {
+ apr_pool_t *pool = p_conn->scpool;
+ h2_proxy_session *session;
+ nghttp2_session_callbacks *cbs;
+ nghttp2_option *option;
+
+ session = apr_pcalloc(pool, sizeof(*session));
+ apr_pool_pre_cleanup_register(pool, p_conn, proxy_session_pre_close);
+ p_conn->data = session;
+
+ session->id = apr_pstrdup(p_conn->scpool, id);
+ session->c = p_conn->connection;
+ session->p_conn = p_conn;
+ session->conf = conf;
+ session->pool = p_conn->scpool;
+ session->state = H2_PROXYS_ST_INIT;
+ session->window_bits_stream = window_bits_stream;
+ session->window_bits_connection = window_bits_connection;
+ session->streams = h2_ihash_create(pool, offsetof(h2_proxy_stream, id));
+ session->suspended = h2_iq_create(pool, 5);
+ session->done = done;
+
+ session->input = apr_brigade_create(session->pool, session->c->bucket_alloc);
+ session->output = apr_brigade_create(session->pool, session->c->bucket_alloc);
+
+ nghttp2_session_callbacks_new(&cbs);
+ nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv);
+ nghttp2_session_callbacks_set_on_data_chunk_recv_callback(cbs, on_data_chunk_recv);
+ nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close);
+ nghttp2_session_callbacks_set_on_header_callback(cbs, on_header);
+ nghttp2_session_callbacks_set_before_frame_send_callback(cbs, before_frame_send);
+ nghttp2_session_callbacks_set_send_callback(cbs, raw_send);
+
+ nghttp2_option_new(&option);
+ nghttp2_option_set_peer_max_concurrent_streams(option, 100);
+ nghttp2_option_set_no_auto_window_update(option, 1);
+
+ nghttp2_session_client_new2(&session->ngh2, cbs, session, option);
+
+ nghttp2_option_del(option);
+ nghttp2_session_callbacks_del(cbs);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
+ "setup session for %s", p_conn->hostname);
+ }
+ return p_conn->data;
+}
+
+static apr_status_t session_start(h2_proxy_session *session)
+{
+ nghttp2_settings_entry settings[2];
+ int rv, add_conn_window;
+ apr_socket_t *s;
+
+ s = ap_get_conn_socket(session->c);
+#if (!defined(WIN32) && !defined(NETWARE)) || defined(DOXYGEN)
+ if (s) {
+ ap_sock_disable_nagle(s);
+ }
+#endif
+
+ settings[0].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH;
+ settings[0].value = 0;
+ settings[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[1].value = (1 << session->window_bits_stream) - 1;
+
+ rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE, settings,
+ H2_ALEN(settings));
+
+ /* If the connection window is larger than our default, trigger a WINDOW_UPDATE */
+ add_conn_window = ((1 << session->window_bits_connection) - 1 -
+ NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE);
+ if (!rv && add_conn_window != 0) {
+ rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE, 0, add_conn_window);
+ }
+ return rv? APR_EGENERAL : APR_SUCCESS;
+}
+
+static apr_status_t open_stream(h2_proxy_session *session, const char *url,
+ request_rec *r, int standalone,
+ h2_proxy_stream **pstream)
+{
+ h2_proxy_stream *stream;
+ apr_uri_t puri;
+ const char *authority, *scheme, *path;
+ apr_status_t status;
+
+ stream = apr_pcalloc(r->pool, sizeof(*stream));
+
+ stream->pool = r->pool;
+ stream->url = url;
+ stream->r = r;
+ stream->standalone = standalone;
+ stream->session = session;
+ stream->state = H2_STREAM_ST_IDLE;
+
+ stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+ stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
+
+ stream->req = h2_req_create(1, stream->pool, 0);
+
+ status = apr_uri_parse(stream->pool, url, &puri);
+ if (status != APR_SUCCESS)
+ return status;
+
+ scheme = (strcmp(puri.scheme, "h2")? "http" : "https");
+ authority = puri.hostname;
+ if (!ap_strchr_c(authority, ':') && puri.port
+ && apr_uri_port_of_scheme(scheme) != puri.port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
+ }
+ path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
+ h2_req_make(stream->req, stream->pool, r->method, scheme,
+ authority, path, r->headers_in);
+
+ /* Tuck away all already existing cookies */
+ stream->saves = apr_table_make(r->pool, 2);
+ apr_table_do(add_header, stream->saves, r->headers_out,"Set-Cookie", NULL);
+
+ *pstream = stream;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *stream)
+{
+ h2_ngheader *hd;
+ nghttp2_data_provider *pp = NULL;
+ nghttp2_data_provider provider;
+ int rv;
+ apr_status_t status;
+
+ hd = h2_util_ngheader_make_req(stream->pool, stream->req);
+
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ if ((status == APR_SUCCESS && !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input)))
+ || APR_STATUS_IS_EAGAIN(status)) {
+ /* there might be data coming */
+ provider.source.fd = 0;
+ provider.source.ptr = NULL;
+ provider.read_callback = stream_data_read;
+ pp = &provider;
+ }
+
+ rv = nghttp2_submit_request(session->ngh2, NULL,
+ hd->nv, hd->nvlen, pp, stream);
+
+ if (APLOGcdebug(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
+ "h2_proxy_session(%s): submit %s%s -> %d",
+ session->id, stream->req->authority, stream->req->path,
+ rv);
+ }
+
+ if (rv > 0) {
+ stream->id = rv;
+ stream->state = H2_STREAM_ST_OPEN;
+ h2_ihash_add(session->streams, stream);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_SUBMITTED, rv, NULL);
+
+ return APR_SUCCESS;
+ }
+ return APR_EGENERAL;
+}
+
+static apr_status_t feed_brigade(h2_proxy_session *session, apr_bucket_brigade *bb)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_size_t readlen = 0;
+ ssize_t n;
+
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket* b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* nop */
+ }
+ else {
+ const char *bdata = NULL;
+ apr_size_t blen = 0;
+
+ status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
+ if (status == APR_SUCCESS && blen > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)bdata, blen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): feeding %ld bytes -> %ld",
+ session->id, (long)blen, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ status = APR_EGENERAL;
+ }
+ }
+ else {
+ readlen += n;
+ if (n < blen) {
+ apr_bucket_split(b, n);
+ }
+ }
+ }
+ }
+ apr_bucket_delete(b);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_proxy_session(%s): fed %ld bytes of input to session",
+ session->id, (long)readlen);
+ if (readlen == 0 && status == APR_SUCCESS) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+static apr_status_t h2_proxy_session_read(h2_proxy_session *session, int block,
+ apr_interval_time_t timeout)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (APR_BRIGADE_EMPTY(session->input)) {
+ apr_socket_t *socket = NULL;
+ apr_time_t save_timeout = -1;
+
+ if (block) {
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &save_timeout);
+ apr_socket_timeout_set(socket, timeout);
+ }
+ else {
+ /* cannot block on timeout */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03379)
+ "h2_proxy_session(%s): unable to get conn socket",
+ session->id);
+ return APR_ENOTIMPL;
+ }
+ }
+
+ status = ap_get_brigade(session->c->input_filters, session->input,
+ AP_MODE_READBYTES,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ,
+ 64 * 1024);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ "h2_proxy_session(%s): read from conn", session->id);
+ if (socket && save_timeout != -1) {
+ apr_socket_timeout_set(socket, save_timeout);
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ status = feed_brigade(session, session->input);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ /* nop */
+ }
+ else if (!APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03380)
+ "h2_proxy_session(%s): read error", session->id);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+
+ return status;
+}
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *session,
+ const char *url, request_rec *r,
+ int standalone)
+{
+ h2_proxy_stream *stream;
+ apr_status_t status;
+
+ status = open_stream(session, url, r, standalone, &stream);
+ if (status == APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03381)
+ "process stream(%d): %s %s%s, original: %s",
+ stream->id, stream->req->method,
+ stream->req->authority, stream->req->path,
+ r->the_request);
+ status = submit_stream(session, stream);
+ }
+ return status;
+}
+
+static apr_status_t check_suspended(h2_proxy_session *session)
+{
+ h2_proxy_stream *stream;
+ int i, stream_id;
+ apr_status_t status;
+
+ for (i = 0; i < session->suspended->nelts; ++i) {
+ stream_id = session->suspended->elts[i];
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(stream->input)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ "h2_proxy_stream(%s-%d): resuming",
+ session->id, stream_id);
+ stream->suspended = 0;
+ h2_iq_remove(session->suspended, stream_id);
+ nghttp2_session_resume_data(session->ngh2, stream_id);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c,
+ APLOGNO(03382) "h2_proxy_stream(%s-%d): check input",
+ session->id, stream_id);
+ h2_iq_remove(session->suspended, stream_id);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ else {
+ /* gone? */
+ h2_iq_remove(session->suspended, stream_id);
+ check_suspended(session);
+ return APR_SUCCESS;
+ }
+ }
+ return APR_EAGAIN;
+}
+
+static apr_status_t session_shutdown(h2_proxy_session *session, int reason,
+ const char *msg)
+{
+ apr_status_t status = APR_SUCCESS;
+ const char *err = msg;
+
+ AP_DEBUG_ASSERT(session);
+ if (!err && reason) {
+ err = nghttp2_strerror(reason);
+ }
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
+ reason, (uint8_t*)err, err? strlen(err):0);
+ status = nghttp2_session_send(session->ngh2);
+ dispatch_event(session, H2_PROXYS_EV_LOCAL_GOAWAY, reason, err);
+ return status;
+}
+
+
+static const char *StateNames[] = {
+ "INIT", /* H2_PROXYS_ST_INIT */
+ "DONE", /* H2_PROXYS_ST_DONE */
+ "IDLE", /* H2_PROXYS_ST_IDLE */
+ "BUSY", /* H2_PROXYS_ST_BUSY */
+ "WAIT", /* H2_PROXYS_ST_WAIT */
+ "LSHUTDOWN", /* H2_PROXYS_ST_LOCAL_SHUTDOWN */
+ "RSHUTDOWN", /* H2_PROXYS_ST_REMOTE_SHUTDOWN */
+};
+
+static const char *state_name(h2_proxys_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static int is_accepting_streams(h2_proxy_session *session)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_WAIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void transit(h2_proxy_session *session, const char *action,
+ h2_proxys_state nstate)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03345)
+ "h2_proxy_session(%s): transit [%s] -- %s --> [%s]", session->id,
+ state_name(session->state), action, state_name(nstate));
+ session->state = nstate;
+}
+
+static void ev_init(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ if (h2_ihash_empty(session->streams)) {
+ transit(session, "init", H2_PROXYS_ST_IDLE);
+ }
+ else {
+ transit(session, "init", H2_PROXYS_ST_BUSY);
+ }
+ break;
+
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_local_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* already did that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* all done */
+ transit(session, "local goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "local goaway", H2_PROXYS_ST_LOCAL_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_remote_goaway(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* already received that? */
+ break;
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* all done */
+ transit(session, "remote goaway", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ transit(session, "remote goaway", H2_PROXYS_ST_REMOTE_SHUTDOWN);
+ break;
+ }
+}
+
+static void ev_conn_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "conn error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, arg, session->c,
+ "h2_proxy_session(%s): conn error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_proto_error(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "proto error", H2_PROXYS_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): proto error -> shutdown", session->id);
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void ev_conn_timeout(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ transit(session, "conn timeout", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_no_io(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ /* nothing for input and output to do. If we remain
+ * in this state, we go into a tight loop and suck up
+ * CPU cycles. Ideally, we'd like to do a blocking read, but that
+ * is not possible if we have scheduled tasks and wait
+ * for them to produce something. */
+ if (h2_ihash_empty(session->streams)) {
+ if (!is_accepting_streams(session)) {
+ /* We are no longer accepting new streams and have
+ * finished processing existing ones. Time to leave. */
+ session_shutdown(session, arg, msg);
+ transit(session, "no io", H2_PROXYS_ST_DONE);
+ }
+ else {
+ /* When we have no streams, no task events are possible,
+ * switch to blocking reads */
+ transit(session, "no io", H2_PROXYS_ST_IDLE);
+ }
+ }
+ else {
+ /* Unable to do blocking reads, as we wait on events from
+ * task processing in other threads. Do a busy wait with
+ * backoff timer. */
+ transit(session, "no io", H2_PROXYS_ST_WAIT);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_submitted(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream submitted", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_done(h2_proxy_session *session, int stream_id,
+ const char *msg)
+{
+ h2_proxy_stream *stream;
+
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (stream) {
+ int touched = (stream->data_sent ||
+ stream_id <= session->last_stream_id);
+ int complete = (stream->error_code == 0);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
+ "h2_proxy_sesssion(%s): stream(%d) closed "
+ "(complete=%d, touched=%d)",
+ session->id, stream_id, complete, touched);
+
+ if (complete && !stream->data_received) {
+ apr_bucket *b;
+ /* if the response had no body, this is the time to flush
+ * an empty brigade which will also "write" the resonse
+ * headers */
+ h2_proxy_stream_end_headers_out(stream);
+ stream->data_received = 1;
+ b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->output, b);
+ ap_pass_brigade(stream->r->output_filters, stream->output);
+ }
+
+ stream->state = H2_STREAM_ST_CLOSED;
+ h2_ihash_remove(session->streams, stream_id);
+ h2_iq_remove(session->suspended, stream_id);
+ if (session->done) {
+ session->done(session, stream->r, complete, touched);
+ }
+ }
+
+ switch (session->state) {
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_resumed(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "stream resumed", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_data_read(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_IDLE:
+ case H2_PROXYS_ST_WAIT:
+ transit(session, "data read", H2_PROXYS_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_ngh2_done(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_PROXYS_ST_DONE);
+ break;
+ }
+}
+
+static void ev_pre_close(h2_proxy_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_PROXYS_ST_DONE:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ /* nop */
+ break;
+ default:
+ session_shutdown(session, arg, msg);
+ break;
+ }
+}
+
+static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
+ int arg, const char *msg)
+{
+ switch (ev) {
+ case H2_PROXYS_EV_INIT:
+ ev_init(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_LOCAL_GOAWAY:
+ ev_local_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_REMOTE_GOAWAY:
+ ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_ERROR:
+ ev_conn_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PROTO_ERROR:
+ ev_proto_error(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_CONN_TIMEOUT:
+ ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NO_IO:
+ ev_no_io(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_SUBMITTED:
+ ev_stream_submitted(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_DONE:
+ ev_stream_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_STREAM_RESUMED:
+ ev_stream_resumed(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_DATA_READ:
+ ev_data_read(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_NGH2_DONE:
+ ev_ngh2_done(session, arg, msg);
+ break;
+ case H2_PROXYS_EV_PRE_CLOSE:
+ ev_pre_close(session, arg, msg);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_proxy_session(%s): unknown event %d",
+ session->id, ev);
+ break;
+ }
+}
+
+apr_status_t h2_proxy_session_process(h2_proxy_session *session)
+{
+ apr_status_t status;
+ int have_written = 0, have_read = 0;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): process", session->id);
+
+run_loop:
+ switch (session->state) {
+ case H2_PROXYS_ST_INIT:
+ status = session_start(session);
+ if (status == APR_SUCCESS) {
+ dispatch_event(session, H2_PROXYS_EV_INIT, 0, NULL);
+ goto run_loop;
+ }
+ else {
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+ }
+ break;
+
+ case H2_PROXYS_ST_BUSY:
+ case H2_PROXYS_ST_LOCAL_SHUTDOWN:
+ case H2_PROXYS_ST_REMOTE_SHUTDOWN:
+ while (nghttp2_session_want_write(session->ngh2)) {
+ int rv = nghttp2_session_send(session->ngh2);
+ if (rv < 0 && nghttp2_is_fatal(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): write, rv=%d", session->id, rv);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL);
+ break;
+ }
+ have_written = 1;
+ }
+
+ if (nghttp2_session_want_read(session->ngh2)) {
+ status = h2_proxy_session_read(session, 0, 0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ }
+ }
+
+ if (!have_written && !have_read
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NO_IO, 0, NULL);
+ goto run_loop;
+ }
+ break;
+
+ case H2_PROXYS_ST_WAIT:
+ if (check_suspended(session) == APR_EAGAIN) {
+ /* no stream has become resumed. Do a blocking read with
+ * ever increasing timeouts... */
+ if (session->wait_timeout < 25) {
+ session->wait_timeout = 25;
+ }
+ else {
+ session->wait_timeout = H2MIN(apr_time_from_msec(100),
+ 2*session->wait_timeout);
+ }
+
+ status = h2_proxy_session_read(session, 1, session->wait_timeout);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ APLOGNO(03365)
+ "h2_proxy_session(%s): WAIT read, timeout=%fms",
+ session->id, (float)session->wait_timeout/1000.0);
+ if (status == APR_SUCCESS) {
+ have_read = 1;
+ dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)
+ || APR_STATUS_IS_EAGAIN(status)) {
+ /* go back to checking all inputs again */
+ transit(session, "wait cycle", H2_PROXYS_ST_BUSY);
+ }
+ }
+ break;
+
+ case H2_PROXYS_ST_IDLE:
+ break;
+
+ case H2_PROXYS_ST_DONE: /* done, session terminated */
+ return APR_EOF;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, session->c,
+ APLOGNO(03346)"h2_proxy_session(%s): unknown state %d",
+ session->id, session->state);
+ dispatch_event(session, H2_PROXYS_EV_PROTO_ERROR, 0, NULL);
+ break;
+ }
+
+
+ if (have_read || have_written) {
+ session->wait_timeout = 0;
+ }
+
+ if (!nghttp2_session_want_read(session->ngh2)
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_PROXYS_EV_NGH2_DONE, 0, NULL);
+ }
+
+ return APR_SUCCESS; /* needs to be called again */
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ h2_proxy_request_done *done;
+} cleanup_iter_ctx;
+
+static int done_iter(void *udata, void *val)
+{
+ cleanup_iter_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+ int touched = (!ctx->session->last_stream_id ||
+ stream->id <= ctx->session->last_stream_id);
+ ctx->done(ctx->session, stream->r, 0, touched);
+ return 1;
+}
+
+void h2_proxy_session_cleanup(h2_proxy_session *session,
+ h2_proxy_request_done *done)
+{
+ if (session->streams && !h2_ihash_empty(session->streams)) {
+ cleanup_iter_ctx ctx;
+ ctx.session = session;
+ ctx.done = done;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366)
+ "h2_proxy_session(%s): terminated, %d streams unfinished",
+ session->id, (int)h2_ihash_count(session->streams));
+ h2_ihash_iter(session->streams, done_iter, &ctx);
+ h2_ihash_clear(session->streams);
+ }
+}
+
+typedef struct {
+ h2_proxy_session *session;
+ conn_rec *c;
+ apr_off_t bytes;
+ int updated;
+} win_update_ctx;
+
+static int win_update_iter(void *udata, void *val)
+{
+ win_update_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+
+ if (stream->r && stream->r->connection == ctx->c) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c,
+ "h2_proxy_session(%s-%d): win_update %ld bytes",
+ ctx->session->id, (int)stream->id, (long)ctx->bytes);
+ nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes);
+ ctx->updated = 1;
+ return 0;
+ }
+ return 1;
+}
+
+
+void h2_proxy_session_update_window(h2_proxy_session *session,
+ conn_rec *c, apr_off_t bytes)
+{
+ if (session->streams && !h2_ihash_empty(session->streams)) {
+ win_update_ctx ctx;
+ ctx.session = session;
+ ctx.c = c;
+ ctx.bytes = bytes;
+ ctx.updated = 0;
+ h2_ihash_iter(session->streams, win_update_iter, &ctx);
+
+ if (!ctx.updated) {
+ /* could not find the stream any more, possibly closed, update
+ * the connection window at least */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): win_update conn %ld bytes",
+ session->id, (long)bytes);
+ nghttp2_session_consume_connection(session->ngh2, (size_t)bytes);
+ }
+ }
+}
+
diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h
new file mode 100644
index 00000000..7f0a1940
--- /dev/null
+++ b/modules/http2/h2_proxy_session.h
@@ -0,0 +1,111 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef h2_proxy_session_h
+#define h2_proxy_session_h
+
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+
+#include <nghttp2/nghttp2.h>
+
+struct h2_iqueue;
+struct h2_ihash_t;
+
+typedef enum {
+ H2_PROXYS_ST_INIT, /* send initial SETTINGS, etc. */
+ H2_PROXYS_ST_DONE, /* finished, connection close */
+ H2_PROXYS_ST_IDLE, /* no streams to process */
+ H2_PROXYS_ST_BUSY, /* read/write without stop */
+ H2_PROXYS_ST_WAIT, /* waiting for tasks reporting back */
+ H2_PROXYS_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */
+ H2_PROXYS_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */
+} h2_proxys_state;
+
+typedef enum {
+ H2_PROXYS_EV_INIT, /* session was initialized */
+ H2_PROXYS_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_PROXYS_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_PROXYS_EV_CONN_ERROR, /* connection error */
+ H2_PROXYS_EV_PROTO_ERROR, /* protocol error */
+ H2_PROXYS_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_PROXYS_EV_NO_IO, /* nothing has been read or written */
+ H2_PROXYS_EV_STREAM_SUBMITTED, /* stream has been submitted */
+ H2_PROXYS_EV_STREAM_DONE, /* stream has been finished */
+ H2_PROXYS_EV_STREAM_RESUMED, /* stream signalled availability of headers/data */
+ H2_PROXYS_EV_DATA_READ, /* connection data has been read */
+ H2_PROXYS_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+ H2_PROXYS_EV_PRE_CLOSE, /* connection will close after this */
+} h2_proxys_event_t;
+
+
+typedef struct h2_proxy_session h2_proxy_session;
+typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
+ int complete, int touched);
+
+struct h2_proxy_session {
+ const char *id;
+ conn_rec *c;
+ proxy_conn_rec *p_conn;
+ proxy_server_conf *conf;
+ apr_pool_t *pool;
+ nghttp2_session *ngh2; /* the nghttp2 session itself */
+
+ unsigned int aborted : 1;
+
+ h2_proxy_request_done *done;
+ void *user_data;
+
+ unsigned char window_bits_stream;
+ unsigned char window_bits_connection;
+
+ h2_proxys_state state;
+ apr_interval_time_t wait_timeout;
+
+ struct h2_ihash_t *streams;
+ struct h2_iqueue *suspended;
+ apr_size_t remote_max_concurrent;
+ int last_stream_id; /* last stream id processed by backend, or 0 */
+
+ apr_bucket_brigade *input;
+ apr_bucket_brigade *output;
+};
+
+h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
+ proxy_server_conf *conf,
+ unsigned char window_bits_connection,
+ unsigned char window_bits_stream,
+ h2_proxy_request_done *done);
+
+apr_status_t h2_proxy_session_submit(h2_proxy_session *s, const char *url,
+ request_rec *r, int standalone);
+
+/**
+ * Perform a step in processing the proxy session. Will return aftert
+ * one read/write cycle and indicate session status by status code.
+ * @param s the session to process
+ * @return APR_EAGAIN when processing needs to be invoked again
+ * APR_SUCCESS when all streams have been processed, session still live
+ * APR_EOF when the session has been terminated
+ */
+apr_status_t h2_proxy_session_process(h2_proxy_session *s);
+
+void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
+
+void h2_proxy_session_update_window(h2_proxy_session *s,
+ conn_rec *c, apr_off_t bytes);
+
+#define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
+
+#endif /* h2_proxy_session_h */
diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c
new file mode 100644
index 00000000..839f4a5a
--- /dev/null
+++ b/modules/http2/h2_proxy_util.c
@@ -0,0 +1,705 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_request.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2.h"
+#include "h2_proxy_util.h"
+
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(apr_uint32_t n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+struct h2_ihash_t {
+ apr_hash_t *hash;
+ size_t ioff;
+};
+
+static unsigned int ihash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+{
+ h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t));
+ ih->hash = apr_hash_make_custom(pool, ihash);
+ ih->ioff = offset_of_int;
+ return ih;
+}
+
+size_t h2_ihash_count(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash);
+}
+
+int h2_ihash_empty(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash) == 0;
+}
+
+void *h2_ihash_get(h2_ihash_t *ih, int id)
+{
+ return apr_hash_get(ih->hash, &id, sizeof(id));
+}
+
+typedef struct {
+ h2_ihash_iter_t *iter;
+ void *ctx;
+} iter_ctx;
+
+static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
+}
+
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = fn;
+ ictx.ctx = ctx;
+ return apr_hash_do(ihash_iter, &ictx, ih->hash);
+}
+
+void h2_ihash_add(h2_ihash_t *ih, void *val)
+{
+ apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
+}
+
+void h2_ihash_remove(h2_ihash_t *ih, int id)
+{
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
+void h2_ihash_clear(h2_ihash_t *ih)
+{
+ apr_hash_clear(ih->hash);
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ int *buffer;
+ size_t max;
+ size_t len;
+} icollect_ctx;
+
+static int icollect_iter(void *x, void *val)
+{
+ icollect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff));
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max)
+{
+ icollect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, icollect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_iqueue *q, int nlen);
+static void iq_swap(h2_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx);
+
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
+ if (q) {
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ }
+ return q;
+}
+
+int h2_iq_empty(h2_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_iq_count(h2_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+}
+
+int h2_iq_remove(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_iq_clear(h2_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisions to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all tasks below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_iq_shift(h2_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+static void iq_grow(h2_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
+/*******************************************************************************
+ * h2_ngheader
+ ******************************************************************************/
+#define H2_HD_MATCH_LIT_CS(l, name) \
+ ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static int h2_util_ignore_header(const char *name)
+{
+ /* never forward, ch. 8.1.2.2 */
+ return (H2_HD_MATCH_LIT_CS("connection", name)
+ || H2_HD_MATCH_LIT_CS("proxy-connection", name)
+ || H2_HD_MATCH_LIT_CS("upgrade", name)
+ || H2_HD_MATCH_LIT_CS("keep-alive", name)
+ || H2_HD_MATCH_LIT_CS("transfer-encoding", name));
+}
+
+static int count_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ (*((size_t*)ctx))++;
+ }
+ return 1;
+}
+
+#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v))
+#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v))
+
+static int add_header(h2_ngheader *ngh,
+ const char *key, size_t key_len,
+ const char *value, size_t val_len)
+{
+ nghttp2_nv *nv = &ngh->nv[ngh->nvlen++];
+
+ nv->name = (uint8_t*)key;
+ nv->namelen = key_len;
+ nv->value = (uint8_t*)value;
+ nv->valuelen = val_len;
+ return 1;
+}
+
+static int add_table_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ add_header(ctx, key, strlen(key), value, strlen(value));
+ }
+ return 1;
+}
+
+h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
+ const struct h2_request *req)
+{
+
+ h2_ngheader *ngh;
+ size_t n;
+
+ AP_DEBUG_ASSERT(req);
+ AP_DEBUG_ASSERT(req->scheme);
+ AP_DEBUG_ASSERT(req->authority);
+ AP_DEBUG_ASSERT(req->path);
+ AP_DEBUG_ASSERT(req->method);
+
+ n = 4;
+ apr_table_do(count_header, &n, req->headers, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ NV_ADD_LIT_CS(ngh, ":scheme", req->scheme);
+ NV_ADD_LIT_CS(ngh, ":authority", req->authority);
+ NV_ADD_LIT_CS(ngh, ":path", req->path);
+ NV_ADD_LIT_CS(ngh, ":method", req->method);
+ apr_table_do(add_table_header, ngh, req->headers, NULL);
+
+ return ngh;
+}
+
+/*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+
+typedef struct {
+ const char *name;
+ size_t len;
+} literal;
+
+#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
+#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
+
+static literal IgnoredRequestHeaders[] = {
+ H2_DEF_LITERAL("expect"),
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("http2-settings"),
+ H2_DEF_LITERAL("proxy-connection"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredProxyRespHds[] = {
+ H2_DEF_LITERAL("alt-svc"),
+};
+
+static int ignore_header(const literal *lits, size_t llen,
+ const char *name, size_t nlen)
+{
+ const literal *lit;
+ int i;
+
+ for (i = 0; i < llen; ++i) {
+ lit = &lits[i];
+ if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int h2_req_ignore_header(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len);
+}
+
+int h2_proxy_res_ignore_header(const char *name, size_t len)
+{
+ return (h2_req_ignore_header(name, len)
+ || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
+}
+
+void h2_util_camel_case_header(char *s, size_t len)
+{
+ size_t start = 1;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ if (start) {
+ if (s[i] >= 'a' && s[i] <= 'z') {
+ s[i] -= 'a' - 'A';
+ }
+
+ start = 0;
+ }
+ else if (s[i] == '-') {
+ start = 1;
+ }
+ }
+}
+
+/*******************************************************************************
+ * h2 request handling
+ ******************************************************************************/
+
+/** Match a header value against a string constance, case insensitive */
+#define H2_HD_MATCH_LIT(l, name, nlen) \
+ ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+static apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+ const char *existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+ return APR_SUCCESS;
+ }
+ }
+ else if (H2_HD_MATCH_LIT("host", name, nlen)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+static h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header,
+ int serialize)
+{
+ h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+
+ req->id = id;
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+ req->serialize = serialize;
+
+ return req;
+}
+
+h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize)
+{
+ return h2_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
+}
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ size_t klen = strlen(key);
+ if (!h2_req_ignore_header(key, klen)) {
+ h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_status_t h2_req_make(h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers)
+{
+ h1_ctx x;
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+
+ AP_DEBUG_ASSERT(req->scheme);
+ AP_DEBUG_ASSERT(req->authority);
+ AP_DEBUG_ASSERT(req->path);
+ AP_DEBUG_ASSERT(req->method);
+
+ x.pool = pool;
+ x.headers = req->headers;
+ apr_table_do(set_h1_header, &x, headers, NULL);
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * frame logging
+ ******************************************************************************/
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+{
+ char scratch[128];
+ size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+
+ switch (frame->hd.type) {
+ case NGHTTP2_DATA: {
+ return apr_snprintf(buffer, maxlen,
+ "DATA[length=%d, flags=%d, stream=%d, padlen=%d]",
+ (int)frame->hd.length, frame->hd.flags,
+ frame->hd.stream_id, (int)frame->data.padlen);
+ }
+ case NGHTTP2_HEADERS: {
+ return apr_snprintf(buffer, maxlen,
+ "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+ }
+ case NGHTTP2_PRIORITY: {
+ return apr_snprintf(buffer, maxlen,
+ "PRIORITY[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_RST_STREAM: {
+ return apr_snprintf(buffer, maxlen,
+ "RST_STREAM[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_SETTINGS: {
+ if (frame->hd.flags & NGHTTP2_FLAG_ACK) {
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[ack=1, stream=%d]",
+ frame->hd.stream_id);
+ }
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[length=%d, stream=%d]",
+ (int)frame->hd.length, frame->hd.stream_id);
+ }
+ case NGHTTP2_PUSH_PROMISE: {
+ return apr_snprintf(buffer, maxlen,
+ "PUSH_PROMISE[length=%d, hend=%d, stream=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_PING: {
+ return apr_snprintf(buffer, maxlen,
+ "PING[length=%d, ack=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags&NGHTTP2_FLAG_ACK,
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+ scratch, frame->goaway.last_stream_id);
+ }
+ case NGHTTP2_WINDOW_UPDATE: {
+ return apr_snprintf(buffer, maxlen,
+ "WINDOW_UPDATE[stream=%d, incr=%d]",
+ frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ }
+ default:
+ return apr_snprintf(buffer, maxlen,
+ "type=%d[length=%d, flags=%d, stream=%d]",
+ frame->hd.type, (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+}
diff --git a/modules/http2/h2_proxy_util.h b/modules/http2/h2_proxy_util.h
new file mode 100644
index 00000000..98f297fa
--- /dev/null
+++ b/modules/http2/h2_proxy_util.h
@@ -0,0 +1,181 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_proxy_util__
+#define __mod_h2__h2_proxy_util__
+
+/*******************************************************************************
+ * some debugging/format helpers
+ ******************************************************************************/
+struct h2_request;
+struct nghttp2_frame;
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+typedef struct h2_ihash_t h2_ihash_t;
+typedef int h2_ihash_iter_t(void *ctx, void *val);
+
+/**
+ * Create a hash for structures that have an identifying int member.
+ * @param pool the pool to use
+ * @param offset_of_int the offsetof() the int member in the struct
+ */
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+
+size_t h2_ihash_count(h2_ihash_t *ih);
+int h2_ihash_empty(h2_ihash_t *ih);
+void *h2_ihash_get(h2_ihash_t *ih, int id);
+
+/**
+ * Iterate over the hash members (without defined order) and invoke
+ * fn for each member until 0 is returned.
+ * @param ih the hash to iterate over
+ * @param fn the function to invoke on each member
+ * @param ctx user supplied data passed into each iteration call
+ * @return 0 if one iteration returned 0, otherwise != 0
+ */
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx);
+
+void h2_ihash_add(h2_ihash_t *ih, void *val);
+void h2_ihash_remove(h2_ihash_t *ih, int id);
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val);
+void h2_ihash_clear(h2_ihash_t *ih);
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param id the identifier of the queue
+ * @param pool the memory pool
+ */
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no tasks in the queue.
+ * @param q the queue to check
+ */
+int h2_iq_empty(h2_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_iq_count(h2_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the task to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ */
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Remove the stream id from the queue. Return != 0 iff task
+ * was found in queue.
+ * @param q the task queue
+ * @param sid the stream id to remove
+ * @return != 0 iff task was found in queue
+ */
+int h2_iq_remove(h2_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_iq_clear(h2_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the task ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first stream id from the queue or NULL if the queue is empty.
+ * The task will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @return the first stream id of the queue, 0 if empty
+ */
+int h2_iq_shift(h2_iqueue *q);
+
+/*******************************************************************************
+ * common helpers
+ ******************************************************************************/
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(apr_uint32_t n);
+
+/*******************************************************************************
+ * HTTP/2 header helpers
+ ******************************************************************************/
+void h2_util_camel_case_header(char *s, size_t len);
+int h2_proxy_res_ignore_header(const char *name, size_t len);
+
+/*******************************************************************************
+ * nghttp2 helpers
+ ******************************************************************************/
+typedef struct h2_ngheader {
+ nghttp2_nv *nv;
+ apr_size_t nvlen;
+} h2_ngheader;
+h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
+ const struct h2_request *req);
+
+/*******************************************************************************
+ * h2_request helpers
+ ******************************************************************************/
+struct h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize);
+apr_status_t h2_req_make(struct h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers);
+
+
+
+#endif /* defined(__mod_h2__h2_proxy_util__) */
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
new file mode 100644
index 00000000..977fab58
--- /dev/null
+++ b/modules/http2/h2_push.c
@@ -0,0 +1,1053 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_hash.h>
+#include <apr_time.h>
+
+#ifdef H2_OPENSSL
+#include <openssl/sha.h>
+#endif
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_h2.h"
+#include "h2_util.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_response.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+
+/*******************************************************************************
+ * link header handling
+ ******************************************************************************/
+
+static const char *policy_str(h2_push_policy policy)
+{
+ switch (policy) {
+ case H2_PUSH_NONE:
+ return "none";
+ case H2_PUSH_FAST_LOAD:
+ return "fast-load";
+ case H2_PUSH_HEAD:
+ return "head";
+ default:
+ return "default";
+ }
+}
+
+typedef struct {
+ const h2_request *req;
+ apr_pool_t *pool;
+ apr_array_header_t *pushes;
+ const char *s;
+ size_t slen;
+ size_t i;
+
+ const char *link;
+ apr_table_t *params;
+ char b[4096];
+} link_ctx;
+
+static int attr_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '+':
+ case '-':
+ case '.':
+ case '^':
+ case '_':
+ case '`':
+ case '|':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int ptoken_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ case ':':
+ case '<':
+ case '=':
+ case '>':
+ case '?':
+ case '@':
+ case '[':
+ case ']':
+ case '^':
+ case '_':
+ case '`':
+ case '{':
+ case '|':
+ case '}':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int skip_ws(link_ctx *ctx)
+{
+ char c;
+ while (ctx->i < ctx->slen
+ && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) {
+ ++ctx->i;
+ }
+ return (ctx->i < ctx->slen);
+}
+
+static int find_chr(link_ctx *ctx, char c, size_t *pidx)
+{
+ size_t j;
+ for (j = ctx->i; j < ctx->slen; ++j) {
+ if (ctx->s[j] == c) {
+ *pidx = j;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_chr(link_ctx *ctx, char c)
+{
+ if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) {
+ ++ctx->i;
+ return 1;
+ }
+ return 0;
+}
+
+static char *mk_str(link_ctx *ctx, size_t end)
+{
+ if (ctx->i < end) {
+ return apr_pstrndup(ctx->pool, ctx->s + ctx->i, end - ctx->i);
+ }
+ return "";
+}
+
+static int read_qstring(link_ctx *ctx, char **ps)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '\"')) {
+ size_t end;
+ if (find_chr(ctx, '\"', &end)) {
+ *ps = mk_str(ctx, end);
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_ptoken(link_ctx *ctx, char **ps)
+{
+ if (skip_ws(ctx)) {
+ size_t i;
+ for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ *ps = mk_str(ctx, i);
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int read_link(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '<')) {
+ size_t end;
+ if (find_chr(ctx, '>', &end)) {
+ ctx->link = mk_str(ctx, end);
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_pname(link_ctx *ctx, char **pname)
+{
+ if (skip_ws(ctx)) {
+ size_t i;
+ for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ *pname = mk_str(ctx, i);
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_pvalue(link_ctx *ctx, char **pvalue)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '=')) {
+ if (read_qstring(ctx, pvalue) || read_ptoken(ctx, pvalue)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_param(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ';')) {
+ char *name, *value = "";
+ if (read_pname(ctx, &name)) {
+ read_pvalue(ctx, &value); /* value is optional */
+ apr_table_setn(ctx->params, name, value);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_sep(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ',')) {
+ return 1;
+ }
+ return 0;
+}
+
+static void init_params(link_ctx *ctx)
+{
+ if (!ctx->params) {
+ ctx->params = apr_table_make(ctx->pool, 5);
+ }
+ else {
+ apr_table_clear(ctx->params);
+ }
+}
+
+static int same_authority(const h2_request *req, const apr_uri_t *uri)
+{
+ if (uri->scheme != NULL && strcmp(uri->scheme, req->scheme)) {
+ return 0;
+ }
+ if (uri->hostinfo != NULL && strcmp(uri->hostinfo, req->authority)) {
+ return 0;
+ }
+ return 1;
+}
+
+static int set_push_header(void *ctx, const char *key, const char *value)
+{
+ size_t klen = strlen(key);
+ if (H2_HD_MATCH_LIT("User-Agent", key, klen)
+ || H2_HD_MATCH_LIT("Accept", key, klen)
+ || H2_HD_MATCH_LIT("Accept-Encoding", key, klen)
+ || H2_HD_MATCH_LIT("Accept-Language", key, klen)
+ || H2_HD_MATCH_LIT("Cache-Control", key, klen)) {
+ apr_table_setn(ctx, key, value);
+ }
+ return 1;
+}
+
+static int has_param(link_ctx *ctx, const char *param)
+{
+ const char *p = apr_table_get(ctx->params, param);
+ return !!p;
+}
+
+static int has_relation(link_ctx *ctx, const char *rel)
+{
+ const char *s, *val = apr_table_get(ctx->params, "rel");
+ if (val) {
+ if (!strcmp(rel, val)) {
+ return 1;
+ }
+ s = ap_strstr_c(val, rel);
+ if (s && (s == val || s[-1] == ' ')) {
+ s += strlen(rel);
+ if (!*s || *s == ' ') {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int add_push(link_ctx *ctx)
+{
+ /* so, we have read a Link header and need to decide
+ * if we transform it into a push.
+ */
+ if (has_relation(ctx, "preload") && !has_param(ctx, "nopush")) {
+ apr_uri_t uri;
+ if (apr_uri_parse(ctx->pool, ctx->link, &uri) == APR_SUCCESS) {
+ if (uri.path && same_authority(ctx->req, &uri)) {
+ char *path;
+ const char *method;
+ apr_table_t *headers;
+ h2_request *req;
+ h2_push *push;
+
+ /* We only want to generate pushes for resources in the
+ * same authority than the original request.
+ * icing: i think that is wise, otherwise we really need to
+ * check that the vhost/server is available and uses the same
+ * TLS (if any) parameters.
+ */
+ path = apr_uri_unparse(ctx->pool, &uri, APR_URI_UNP_OMITSITEPART);
+ push = apr_pcalloc(ctx->pool, sizeof(*push));
+ switch (ctx->req->push_policy) {
+ case H2_PUSH_HEAD:
+ method = "HEAD";
+ break;
+ default:
+ method = "GET";
+ break;
+ }
+ headers = apr_table_make(ctx->pool, 5);
+ apr_table_do(set_push_header, headers, ctx->req->headers, NULL);
+ req = h2_req_createn(0, ctx->pool, method, ctx->req->scheme,
+ ctx->req->authority, path, headers,
+ ctx->req->serialize);
+ /* atm, we do not push on pushes */
+ h2_request_end_headers(req, ctx->pool, 1, 0);
+ push->req = req;
+
+ if (!ctx->pushes) {
+ ctx->pushes = apr_array_make(ctx->pool, 5, sizeof(h2_push*));
+ }
+ APR_ARRAY_PUSH(ctx->pushes, h2_push*) = push;
+ }
+ }
+ }
+ return 0;
+}
+
+static void inspect_link(link_ctx *ctx, const char *s, size_t slen)
+{
+ /* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1>
+ Link = "Link" ":" #link-value
+ link-value = "<" URI-Reference ">" *( ";" link-param )
+ link-param = ( ( "rel" "=" relation-types )
+ | ( "anchor" "=" <"> URI-Reference <"> )
+ | ( "rev" "=" relation-types )
+ | ( "hreflang" "=" Language-Tag )
+ | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) )
+ | ( "title" "=" quoted-string )
+ | ( "title*" "=" ext-value )
+ | ( "type" "=" ( media-type | quoted-mt ) )
+ | ( link-extension ) )
+ link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] )
+ | ( ext-name-star "=" ext-value )
+ ext-name-star = parmname "*" ; reserved for RFC2231-profiled
+ ; extensions. Whitespace NOT
+ ; allowed in between.
+ ptoken = 1*ptokenchar
+ ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "("
+ | ")" | "*" | "+" | "-" | "." | "/" | DIGIT
+ | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA
+ | "[" | "]" | "^" | "_" | "`" | "{" | "|"
+ | "}" | "~"
+ media-type = type-name "/" subtype-name
+ quoted-mt = <"> media-type <">
+ relation-types = relation-type
+ | <"> relation-type *( 1*SP relation-type ) <">
+ relation-type = reg-rel-type | ext-rel-type
+ reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" )
+ ext-rel-type = URI
+
+ and from <https://tools.ietf.org/html/rfc5987>
+ parmname = 1*attr-char
+ attr-char = ALPHA / DIGIT
+ / "!" / "#" / "$" / "&" / "+" / "-" / "."
+ / "^" / "_" / "`" / "|" / "~"
+ */
+
+ ctx->s = s;
+ ctx->slen = slen;
+ ctx->i = 0;
+
+ while (read_link(ctx)) {
+ init_params(ctx);
+ while (read_param(ctx)) {
+ /* nop */
+ }
+ add_push(ctx);
+ if (!read_sep(ctx)) {
+ break;
+ }
+ }
+}
+
+static int head_iter(void *ctx, const char *key, const char *value)
+{
+ if (!apr_strnatcasecmp("link", key)) {
+ inspect_link(ctx, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
+ const h2_response *res)
+{
+ if (req && req->push_policy != H2_PUSH_NONE) {
+ /* Collect push candidates from the request/response pair.
+ *
+ * One source for pushes are "rel=preload" link headers
+ * in the response.
+ *
+ * TODO: This may be extended in the future by hooks or callbacks
+ * where other modules can provide push information directly.
+ */
+ if (res->headers) {
+ link_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.req = req;
+ ctx.pool = p;
+
+ apr_table_do(head_iter, &ctx, res->headers, NULL);
+ if (ctx.pushes) {
+ apr_table_setn(res->headers, "push-policy", policy_str(req->push_policy));
+ }
+ return ctx.pushes;
+ }
+ }
+ return NULL;
+}
+
+/*******************************************************************************
+ * push diary
+ *
+ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
+ * connection. It records a hash value from the absolute URL of the resource
+ * pushed.
+ * - Lacking openssl, it uses 'apr_hashfunc_default' for the value
+ * - with openssl, it uses SHA256 to calculate the hash value
+ * - whatever the method to generate the hash, the diary keeps a maximum of 64
+ * bits per hash, limiting the memory consumption to about
+ * H2PushDiarySize * 8
+ * bytes. Entries are sorted by most recently used and oldest entries are
+ * forgotten first.
+ * - Clients can initialize/replace the push diary by sending a 'Cache-Digest'
+ * header. Currently, this is the base64url encoded value of the cache digest
+ * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * This draft can be expected to evolve and the definition of the header
+ * will be added there and refined.
+ * - The cache digest header is a Golomb Coded Set of hash values, but it may
+ * limit the amount of bits per hash value even further. For a good description
+ * of GCS, read here:
+ * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters
+ * - The means that the push diary might be initialized with hash values of much
+ * less than 64 bits, leading to more false positives, but smaller digest size.
+ ******************************************************************************/
+
+
+#define GCSLOG_LEVEL APLOG_TRACE1
+
+typedef struct h2_push_diary_entry {
+ apr_uint64_t hash;
+} h2_push_diary_entry;
+
+
+#ifdef H2_OPENSSL
+static void sha256_update(SHA256_CTX *ctx, const char *s)
+{
+ SHA256_Update(ctx, s, strlen(s));
+}
+
+static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ SHA256_CTX sha256;
+ apr_uint64_t val;
+ unsigned char hash[SHA256_DIGEST_LENGTH];
+ int i;
+
+ SHA256_Init(&sha256);
+ sha256_update(&sha256, push->req->scheme);
+ sha256_update(&sha256, "://");
+ sha256_update(&sha256, push->req->authority);
+ sha256_update(&sha256, push->req->path);
+ SHA256_Final(hash, &sha256);
+
+ val = 0;
+ for (i = 0; i != sizeof(val); ++i)
+ val = val * 256 + hash[i];
+ *phash = val >> (64 - diary->mask_bits);
+}
+#endif
+
+
+static unsigned int val_apr_hash(const char *str)
+{
+ apr_ssize_t len = strlen(str);
+ return apr_hashfunc_default(str, &len);
+}
+
+static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
+{
+ apr_uint64_t val;
+#if APR_UINT64MAX > APR_UINT_MAX
+ val = (val_apr_hash(push->req->scheme) << 32);
+ val ^= (val_apr_hash(push->req->authority) << 16);
+ val ^= val_apr_hash(push->req->path);
+#else
+ val = val_apr_hash(push->req->scheme);
+ val ^= val_apr_hash(push->req->authority);
+ val ^= val_apr_hash(push->req->path);
+#endif
+ *phash = val;
+}
+
+static apr_int32_t ceil_power_of_2(apr_int32_t n)
+{
+ if (n <= 2) return 2;
+ --n;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ return ++n;
+}
+
+static h2_push_diary *diary_create(apr_pool_t *p, h2_push_digest_type dtype,
+ apr_size_t N)
+{
+ h2_push_diary *diary = NULL;
+
+ if (N > 0) {
+ diary = apr_pcalloc(p, sizeof(*diary));
+
+ diary->NMax = ceil_power_of_2(N);
+ diary->N = diary->NMax;
+ /* the mask we use in value comparision depends on where we got
+ * the values from. If we calculate them ourselves, we can use
+ * the full 64 bits.
+ * If we set the diary via a compressed golomb set, we have less
+ * relevant bits and need to use a smaller mask. */
+ diary->mask_bits = 64;
+ /* grows by doubling, start with a power of 2 */
+ diary->entries = apr_array_make(p, 16, sizeof(h2_push_diary_entry));
+
+ switch (dtype) {
+#ifdef H2_OPENSSL
+ case H2_PUSH_DIGEST_SHA256:
+ diary->dtype = H2_PUSH_DIGEST_SHA256;
+ diary->dcalc = calc_sha256_hash;
+ break;
+#endif /* ifdef H2_OPENSSL */
+ default:
+ diary->dtype = H2_PUSH_DIGEST_APR_HASH;
+ diary->dcalc = calc_apr_hash;
+ break;
+ }
+ }
+
+ return diary;
+}
+
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, apr_size_t N)
+{
+ return diary_create(p, H2_PUSH_DIGEST_SHA256, N);
+}
+
+static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash)
+{
+ if (diary) {
+ h2_push_diary_entry *e;
+ int i;
+
+ /* search from the end, where the last accessed digests are */
+ for (i = diary->entries->nelts-1; i >= 0; --i) {
+ e = &APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry);
+ if (e->hash == hash) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx)
+{
+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
+ h2_push_diary_entry e;
+ apr_size_t lastidx = diary->entries->nelts-1;
+
+ /* move entry[idx] to the end */
+ if (idx < lastidx) {
+ e = entries[idx];
+ memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx));
+ entries[lastidx] = e;
+ }
+ return &entries[lastidx];
+}
+
+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
+{
+ h2_push_diary_entry *ne;
+
+ if (diary->entries->nelts < diary->N) {
+ /* append a new diary entry at the end */
+ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
+ ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry);
+ }
+ else {
+ /* replace content with new digest. keeps memory usage constant once diary is full */
+ ne = move_to_last(diary, 0);
+ *ne = *e;
+ }
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
+ "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash);
+}
+
+apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
+{
+ apr_array_header_t *npushes = pushes;
+ h2_push_diary_entry e;
+ int i, idx;
+
+ if (session->push_diary && pushes) {
+ npushes = NULL;
+
+ for (i = 0; i < pushes->nelts; ++i) {
+ h2_push *push;
+
+ push = APR_ARRAY_IDX(pushes, i, h2_push*);
+ session->push_diary->dcalc(session->push_diary, &e.hash, push);
+ idx = h2_push_diary_find(session->push_diary, e.hash);
+ if (idx >= 0) {
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ "push_diary_update: already there PUSH %s", push->req->path);
+ move_to_last(session->push_diary, idx);
+ }
+ else {
+ /* Intentional no APLOGNO */
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ "push_diary_update: adding PUSH %s", push->req->path);
+ if (!npushes) {
+ npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*));
+ }
+ APR_ARRAY_PUSH(npushes, h2_push*) = push;
+ h2_push_diary_append(session->push_diary, &e);
+ }
+ }
+ }
+ return npushes;
+}
+
+apr_array_header_t *h2_push_collect_update(h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_response *res)
+{
+ h2_session *session = stream->session;
+ const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
+ apr_array_header_t *pushes;
+ apr_status_t status;
+
+ if (cache_digest && session->push_diary) {
+ status = h2_push_diary_digest64_set(session->push_diary, req->authority,
+ cache_digest, stream->pool);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ APLOGNO(03057)
+ "h2_session(%ld): push diary set from Cache-Digest: %s",
+ session->id, cache_digest);
+ }
+ }
+ pushes = h2_push_collect(stream->pool, req, res);
+ return h2_push_diary_update(stream->session, pushes);
+}
+
+static apr_int32_t h2_log2inv(unsigned char log2)
+{
+ return log2? (1 << log2) : 1;
+}
+
+
+typedef struct {
+ h2_push_diary *diary;
+ unsigned char log2p;
+ apr_uint32_t mask_bits;
+ apr_uint32_t delta_bits;
+ apr_uint32_t fixed_bits;
+ apr_uint64_t fixed_mask;
+ apr_pool_t *pool;
+ unsigned char *data;
+ apr_size_t datalen;
+ apr_size_t offset;
+ unsigned int bit;
+ apr_uint64_t last;
+} gset_encoder;
+
+static int cmp_puint64(const void *p1, const void *p2)
+{
+ const apr_uint64_t *pu1 = p1, *pu2 = p2;
+ return (*pu1 > *pu2)? 1 : ((*pu1 == *pu2)? 0 : -1);
+}
+
+/* in golomb bit stream encoding, bit 0 is the 8th of the first char, or
+ * more generally:
+ * char(bit/8) & cbit_mask[(bit % 8)]
+ */
+static unsigned char cbit_mask[] = {
+ 0x80u,
+ 0x40u,
+ 0x20u,
+ 0x10u,
+ 0x08u,
+ 0x04u,
+ 0x02u,
+ 0x01u,
+};
+
+static apr_status_t gset_encode_bit(gset_encoder *encoder, int bit)
+{
+ if (++encoder->bit >= 8) {
+ if (++encoder->offset >= encoder->datalen) {
+ apr_size_t nlen = encoder->datalen*2;
+ unsigned char *ndata = apr_pcalloc(encoder->pool, nlen);
+ if (!ndata) {
+ return APR_ENOMEM;
+ }
+ memcpy(ndata, encoder->data, encoder->datalen);
+ encoder->data = ndata;
+ encoder->datalen = nlen;
+ }
+ encoder->bit = 0;
+ encoder->data[encoder->offset] = 0xffu;
+ }
+ if (!bit) {
+ encoder->data[encoder->offset] &= ~cbit_mask[encoder->bit];
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval)
+{
+ apr_uint64_t delta, flex_bits;
+ apr_status_t status = APR_SUCCESS;
+ int i;
+
+ delta = pval - encoder->last;
+ encoder->last = pval;
+ flex_bits = (delta >> encoder->fixed_bits);
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, encoder->pool,
+ "h2_push_diary_enc: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+ APR_UINT64_T_HEX_FMT" flex_bits=%"APR_UINT64_T_FMT", "
+ ", fixed_bits=%d, fixed_val=%"APR_UINT64_T_HEX_FMT,
+ pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask);
+ for (; flex_bits != 0; --flex_bits) {
+ status = gset_encode_bit(encoder, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ status = gset_encode_bit(encoder, 0);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (i = encoder->fixed_bits-1; i >= 0; --i) {
+ status = gset_encode_bit(encoder, (delta >> i) & 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
+ apr_uint32_t maxP, const char *authority,
+ const char **pdata, apr_size_t *plen)
+{
+ apr_size_t nelts, N, i;
+ unsigned char log2n, log2pmax;
+ gset_encoder encoder;
+ apr_uint64_t *hashes;
+ apr_size_t hash_count;
+
+ nelts = diary->entries->nelts;
+
+ if (nelts > APR_UINT32_MAX) {
+ /* should not happen */
+ return APR_ENOTIMPL;
+ }
+ N = ceil_power_of_2(nelts);
+ log2n = h2_log2(N);
+
+ /* Now log2p is the max number of relevant bits, so that
+ * log2p + log2n == mask_bits. We can uise a lower log2p
+ * and have a shorter set encoding...
+ */
+ log2pmax = h2_log2(ceil_power_of_2(maxP));
+
+ memset(&encoder, 0, sizeof(encoder));
+ encoder.diary = diary;
+ encoder.log2p = H2MIN(diary->mask_bits - log2n, log2pmax);
+ encoder.mask_bits = log2n + encoder.log2p;
+ encoder.delta_bits = diary->mask_bits - encoder.mask_bits;
+ encoder.fixed_bits = encoder.log2p;
+ encoder.fixed_mask = 1;
+ encoder.fixed_mask = (encoder.fixed_mask << encoder.fixed_bits) - 1;
+ encoder.pool = pool;
+ encoder.datalen = 512;
+ encoder.data = apr_pcalloc(encoder.pool, encoder.datalen);
+
+ encoder.data[0] = log2n;
+ encoder.data[1] = encoder.log2p;
+ encoder.offset = 1;
+ encoder.bit = 8;
+ encoder.last = 0;
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, "
+ "mask_bits=%d, enc.mask_bits=%d, delta_bits=%d, enc.log2p=%d, authority=%s",
+ (int)nelts, (int)N, (int)log2n, diary->mask_bits,
+ (int)encoder.mask_bits, (int)encoder.delta_bits,
+ (int)encoder.log2p, authority);
+
+ if (!authority || !diary->authority
+ || !strcmp("*", authority) || !strcmp(diary->authority, authority)) {
+ hash_count = diary->entries->nelts;
+ hashes = apr_pcalloc(encoder.pool, hash_count);
+ for (i = 0; i < hash_count; ++i) {
+ hashes[i] = ((&APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry))->hash
+ >> encoder.delta_bits);
+ }
+
+ qsort(hashes, hash_count, sizeof(apr_uint64_t), cmp_puint64);
+ for (i = 0; i < hash_count; ++i) {
+ if (!i || (hashes[i] != hashes[i-1])) {
+ gset_encode_next(&encoder, hashes[i]);
+ }
+ }
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_get: golomb compressed hashes, %d bytes",
+ (int)encoder.offset + 1);
+ }
+ *pdata = (const char *)encoder.data;
+ *plen = encoder.offset + 1;
+
+ return APR_SUCCESS;
+}
+
+typedef struct {
+ h2_push_diary *diary;
+ apr_pool_t *pool;
+ unsigned char log2p;
+ const unsigned char *data;
+ apr_size_t datalen;
+ apr_size_t offset;
+ unsigned int bit;
+ apr_uint64_t last_val;
+} gset_decoder;
+
+static int gset_decode_next_bit(gset_decoder *decoder)
+{
+ if (++decoder->bit >= 8) {
+ if (++decoder->offset >= decoder->datalen) {
+ return -1;
+ }
+ decoder->bit = 0;
+ }
+ return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0;
+}
+
+static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
+{
+ apr_uint64_t flex = 0, fixed = 0, delta;
+ int i;
+
+ /* read 1 bits until we encounter 0, then read log2n(diary-P) bits.
+ * On a malformed bit-string, this will not fail, but produce results
+ * which are pbly too large. Luckily, the diary will modulo the hash.
+ */
+ while (1) {
+ int bit = gset_decode_next_bit(decoder);
+ if (bit == -1) {
+ return APR_EINVAL;
+ }
+ if (!bit) {
+ break;
+ }
+ ++flex;
+ }
+
+ for (i = 0; i < decoder->log2p; ++i) {
+ int bit = gset_decode_next_bit(decoder);
+ if (bit == -1) {
+ return APR_EINVAL;
+ }
+ fixed = (fixed << 1) | bit;
+ }
+
+ delta = (flex << decoder->log2p) | fixed;
+ *phash = delta + decoder->last_val;
+ decoder->last_val = *phash;
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
+ "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%"
+ APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT,
+ *phash, delta, (int)flex, fixed);
+
+ return APR_SUCCESS;
+}
+
+/**
+ * Initialize the push diary by a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * .
+ * @param diary the diary to set the digest into
+ * @param data the binary cache digest
+ * @param len the length of the cache digest
+ * @return APR_EINVAL if digest was not successfully parsed
+ */
+apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+ const char *data, apr_size_t len)
+{
+ gset_decoder decoder;
+ unsigned char log2n, log2p;
+ apr_size_t N, i;
+ apr_pool_t *pool = diary->entries->pool;
+ h2_push_diary_entry e;
+ apr_status_t status = APR_SUCCESS;
+
+ if (len < 2) {
+ /* at least this should be there */
+ return APR_EINVAL;
+ }
+ log2n = data[0];
+ log2p = data[1];
+ diary->mask_bits = log2n + log2p;
+ if (diary->mask_bits > 64) {
+ /* cannot handle */
+ return APR_ENOTIMPL;
+ }
+
+ /* whatever is in the digest, it replaces the diary entries */
+ apr_array_clear(diary->entries);
+ if (!authority || !strcmp("*", authority)) {
+ diary->authority = NULL;
+ }
+ else if (!diary->authority || strcmp(diary->authority, authority)) {
+ diary->authority = apr_pstrdup(diary->entries->pool, authority);
+ }
+
+ N = h2_log2inv(log2n + log2p);
+
+ decoder.diary = diary;
+ decoder.pool = pool;
+ decoder.log2p = log2p;
+ decoder.data = (const unsigned char*)data;
+ decoder.datalen = len;
+ decoder.offset = 1;
+ decoder.bit = 8;
+ decoder.last_val = 0;
+
+ diary->N = N;
+ /* Determine effective N we use for storage */
+ if (!N) {
+ /* a totally empty cache digest. someone tells us that she has no
+ * entries in the cache at all. Use our own preferences for N+mask
+ */
+ diary->N = diary->NMax;
+ return APR_SUCCESS;
+ }
+ else if (N > diary->NMax) {
+ /* Store not more than diary is configured to hold. We open us up
+ * to DOS attacks otherwise. */
+ diary->N = diary->NMax;
+ }
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_set: N=%d, log2n=%d, "
+ "diary->mask_bits=%d, dec.log2p=%d",
+ (int)diary->N, (int)log2n, diary->mask_bits,
+ (int)decoder.log2p);
+
+ for (i = 0; i < diary->N; ++i) {
+ if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
+ /* the data may have less than N values */
+ break;
+ }
+ h2_push_diary_append(diary, &e);
+ }
+
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
+ (int)diary->entries->nelts, diary->mask_bits);
+ return status;
+}
+
+apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+ const char *data64url, apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
+ /* Intentional no APLOGNO */
+ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
+ "h2_push_diary_digest64_set: digest=%s, dlen=%d",
+ data64url, (int)len);
+ return h2_push_diary_digest_set(diary, authority, data, len);
+}
+
diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h
new file mode 100644
index 00000000..62f5a0a7
--- /dev/null
+++ b/modules/http2/h2_push.h
@@ -0,0 +1,116 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __mod_h2__h2_push__
+#define __mod_h2__h2_push__
+
+#include "h2.h"
+
+struct h2_request;
+struct h2_response;
+struct h2_ngheader;
+struct h2_session;
+struct h2_stream;
+
+typedef struct h2_push {
+ const struct h2_request *req;
+} h2_push;
+
+typedef enum {
+ H2_PUSH_DIGEST_APR_HASH,
+ H2_PUSH_DIGEST_SHA256
+} h2_push_digest_type;
+
+typedef struct h2_push_diary h2_push_diary;
+
+typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
+
+struct h2_push_diary {
+ apr_array_header_t *entries;
+ apr_size_t NMax; /* Maximum for N, should size change be necessary */
+ apr_size_t N; /* Current maximum number of entries, power of 2 */
+ apr_uint64_t mask; /* mask for relevant bits */
+ unsigned int mask_bits; /* number of relevant bits */
+ const char *authority;
+ h2_push_digest_type dtype;
+ h2_push_digest_calc *dcalc;
+};
+
+/**
+ * Determine the list of h2_push'es to send to the client on behalf of
+ * the given request/response pair.
+ *
+ * @param p the pool to use
+ * @param req the requst from the client
+ * @param res the response from the server
+ * @return array of h2_push addresses or NULL
+ */
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ const struct h2_response *res);
+
+/**
+ * Create a new push diary for the given maximum number of entries.
+ *
+ * @param p the pool to use
+ * @param N the max number of entries, rounded up to 2^x
+ * @return the created diary, might be NULL of max_entries is 0
+ */
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, apr_size_t N);
+
+/**
+ * Filters the given pushes against the diary and returns only those pushes
+ * that were newly entered in the diary.
+ */
+apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_header_t *pushes);
+
+/**
+ * Collect pushes for the given request/response pair, enter them into the
+ * diary and return those pushes newly entered.
+ */
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_response *res);
+/**
+ * Get a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * from the contents of the push diary.
+ *
+ * @param diary the diary to calculdate the digest from
+ * @param p the pool to use
+ * @param authority the authority to get the data for, use NULL/"*" for all
+ * @param pdata on successful return, the binary cache digest
+ * @param plen on successful return, the length of the binary data
+ */
+apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p,
+ apr_uint32_t maxP, const char *authority,
+ const char **pdata, apr_size_t *plen);
+
+/**
+ * Initialize the push diary by a cache digest as described in
+ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
+ * .
+ * @param diary the diary to set the digest into
+ * @param authority the authority to set the data for
+ * @param data the binary cache digest
+ * @param len the length of the cache digest
+ * @return APR_EINVAL if digest was not successfully parsed
+ */
+apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
+ const char *data, apr_size_t len);
+
+apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
+ const char *data64url, apr_pool_t *pool);
+
+#endif /* defined(__mod_h2__h2_push__) */
diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
new file mode 100644
index 00000000..d213e167
--- /dev/null
+++ b/modules/http2/h2_request.c
@@ -0,0 +1,363 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mpm.h>
+#include <mod_core.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_util.h"
+
+
+static apr_status_t inspect_clen(h2_request *req, const char *s)
+{
+ char *end;
+ req->content_length = apr_strtoi64(s, &end, 10);
+ return (s == end)? APR_EINVAL : APR_SUCCESS;
+}
+
+apr_status_t h2_request_rwrite(h2_request *req, apr_pool_t *pool,
+ request_rec *r)
+{
+ apr_status_t status;
+ const char *scheme, *authority;
+
+ scheme = apr_pstrdup(pool, r->parsed_uri.scheme? r->parsed_uri.scheme
+ : ap_http_scheme(r));
+ authority = apr_pstrdup(pool, r->hostname);
+ if (!ap_strchr_c(authority, ':') && r->server && r->server->port) {
+ apr_port_t defport = apr_uri_port_of_scheme(scheme);
+ if (defport != r->server->port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(pool, "%s:%d", authority,
+ (int)r->server->port);
+ }
+ }
+
+ status = h2_req_make(req, pool, apr_pstrdup(pool, r->method), scheme,
+ authority, apr_uri_unparse(pool, &r->parsed_uri,
+ APR_URI_UNP_OMITSITEPART),
+ r->headers_in);
+ return status;
+}
+
+apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (nlen <= 0) {
+ return status;
+ }
+
+ if (name[0] == ':') {
+ /* pseudo header, see ch. 8.1.2.3, always should come first */
+ if (!apr_is_empty_table(req->headers)) {
+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool,
+ APLOGNO(02917)
+ "h2_request(%d): pseudo header after request start",
+ req->id);
+ return APR_EGENERAL;
+ }
+
+ if (H2_HEADER_METHOD_LEN == nlen
+ && !strncmp(H2_HEADER_METHOD, name, nlen)) {
+ req->method = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_SCHEME_LEN == nlen
+ && !strncmp(H2_HEADER_SCHEME, name, nlen)) {
+ req->scheme = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_PATH_LEN == nlen
+ && !strncmp(H2_HEADER_PATH, name, nlen)) {
+ req->path = apr_pstrndup(pool, value, vlen);
+ }
+ else if (H2_HEADER_AUTH_LEN == nlen
+ && !strncmp(H2_HEADER_AUTH, name, nlen)) {
+ req->authority = apr_pstrndup(pool, value, vlen);
+ }
+ else {
+ char buffer[32];
+ memset(buffer, 0, 32);
+ strncpy(buffer, name, (nlen > 31)? 31 : nlen);
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool,
+ APLOGNO(02954)
+ "h2_request(%d): ignoring unknown pseudo header %s",
+ req->id, buffer);
+ }
+ }
+ else {
+ /* non-pseudo header, append to work bucket of stream */
+ status = h2_headers_add_h1(req->headers, pool, name, nlen, value, vlen);
+ }
+
+ return status;
+}
+
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+ int eos, int push)
+{
+ const char *s;
+
+ if (req->eoh) {
+ /* already done */
+ return APR_SUCCESS;
+ }
+
+ /* rfc7540, ch. 8.1.2.3:
+ * - if we have :authority, it overrides any Host header
+ * - :authority MUST be ommited when converting h1->h2, so we
+ * might get a stream without, but then Host needs to be there */
+ if (!req->authority) {
+ const char *host = apr_table_get(req->headers, "Host");
+ if (!host) {
+ return APR_BADARG;
+ }
+ req->authority = host;
+ }
+ else {
+ apr_table_setn(req->headers, "Host", req->authority);
+ }
+
+ s = apr_table_get(req->headers, "Content-Length");
+ if (s) {
+ if (inspect_clen(req, s) != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, pool,
+ APLOGNO(02959)
+ "h2_request(%d): content-length value not parsed: %s",
+ req->id, s);
+ return APR_EINVAL;
+ }
+ }
+ else {
+ /* no content-length given */
+ req->content_length = -1;
+ if (!eos) {
+ /* We have not seen a content-length and have no eos,
+ * simulate a chunked encoding for our HTTP/1.1 infrastructure,
+ * in case we have "H2SerializeHeaders on" here
+ */
+ req->chunked = 1;
+ apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
+ }
+ else if (apr_table_get(req->headers, "Content-Type")) {
+ /* If we have a content-type, but already see eos, no more
+ * data will come. Signal a zero content length explicitly.
+ */
+ apr_table_setn(req->headers, "Content-Length", "0");
+ }
+ }
+
+ req->eoh = 1;
+ h2_push_policy_determine(req, pool, push);
+
+ /* In the presence of trailers, force behaviour of chunked encoding */
+ s = apr_table_get(req->headers, "Trailer");
+ if (s && s[0]) {
+ req->trailers = apr_table_make(pool, 5);
+ if (!req->chunked) {
+ req->chunked = 1;
+ apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t add_h1_trailer(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_req_ignore_trailer(name, nlen)) {
+ return APR_SUCCESS;
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+
+ apr_table_mergen(req->trailers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+
+apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ if (!req->trailers) {
+ ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, APLOGNO(03059)
+ "h2_request(%d): unanounced trailers",
+ req->id);
+ return APR_EINVAL;
+ }
+ if (nlen == 0 || name[0] == ':') {
+ ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, APLOGNO(03060)
+ "h2_request(%d): pseudo header in trailer",
+ req->id);
+ return APR_EINVAL;
+ }
+ return add_h1_trailer(req, pool, name, nlen, value, vlen);
+}
+
+h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
+{
+ h2_request *dst = apr_pmemdup(p, src, sizeof(*dst));
+ dst->method = apr_pstrdup(p, src->method);
+ dst->scheme = apr_pstrdup(p, src->scheme);
+ dst->authority = apr_pstrdup(p, src->authority);
+ dst->path = apr_pstrdup(p, src->path);
+ dst->headers = apr_table_clone(p, src->headers);
+ if (src->trailers) {
+ dst->trailers = apr_table_clone(p, src->trailers);
+ }
+ return dst;
+}
+
+request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn)
+{
+ request_rec *r;
+ apr_pool_t *p;
+ int access_status = HTTP_OK;
+
+ apr_pool_create(&p, conn->pool);
+ apr_pool_tag(p, "request");
+ r = apr_pcalloc(p, sizeof(request_rec));
+ AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)conn);
+ r->pool = p;
+ r->connection = conn;
+ r->server = conn->base_server;
+
+ r->user = NULL;
+ r->ap_auth_type = NULL;
+
+ r->allowed_methods = ap_make_method_list(p, 2);
+
+ r->headers_in = apr_table_clone(r->pool, req->headers);
+ r->trailers_in = apr_table_make(r->pool, 5);
+ r->subprocess_env = apr_table_make(r->pool, 25);
+ r->headers_out = apr_table_make(r->pool, 12);
+ r->err_headers_out = apr_table_make(r->pool, 5);
+ r->trailers_out = apr_table_make(r->pool, 5);
+ r->notes = apr_table_make(r->pool, 5);
+
+ r->request_config = ap_create_request_config(r->pool);
+ /* Must be set before we run create request hook */
+
+ r->proto_output_filters = conn->output_filters;
+ r->output_filters = r->proto_output_filters;
+ r->proto_input_filters = conn->input_filters;
+ r->input_filters = r->proto_input_filters;
+ ap_run_create_request(r);
+ r->per_dir_config = r->server->lookup_defaults;
+
+ r->sent_bodyct = 0; /* bytect isn't for body */
+
+ r->read_length = 0;
+ r->read_body = REQUEST_NO_BODY;
+
+ r->status = HTTP_OK; /* Until further notice */
+ r->header_only = 0;
+ r->the_request = NULL;
+
+ /* Begin by presuming any module can make its own path_info assumptions,
+ * until some module interjects and changes the value.
+ */
+ r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+ r->useragent_addr = conn->client_addr;
+ r->useragent_ip = conn->client_ip;
+
+ ap_run_pre_read_request(r, conn);
+
+ /* Time to populate r with the data we have. */
+ r->request_time = req->request_time;
+ r->method = req->method;
+ /* Provide quick information about the request method as soon as known */
+ r->method_number = ap_method_number_of(r->method);
+ if (r->method_number == M_GET && r->method[0] == 'H') {
+ r->header_only = 1;
+ }
+
+ ap_parse_uri(r, req->path);
+ r->protocol = "HTTP/2.0";
+ r->proto_num = HTTP_VERSION(2, 0);
+
+ r->the_request = apr_psprintf(r->pool, "%s %s %s",
+ r->method, req->path, r->protocol);
+
+ /* update what we think the virtual host is based on the headers we've
+ * now read. may update status.
+ * Leave r->hostname empty, vhost will parse if form our Host: header,
+ * otherwise we get complains about port numbers.
+ */
+ r->hostname = NULL;
+ ap_update_vhost_from_headers(r);
+
+ /* we may have switched to another server */
+ r->per_dir_config = r->server->lookup_defaults;
+
+ /*
+ * Add the HTTP_IN filter here to ensure that ap_discard_request_body
+ * called by ap_die and by ap_send_error_response works correctly on
+ * status codes that do not cause the connection to be dropped and
+ * in situations where the connection should be kept alive.
+ */
+ ap_add_input_filter_handle(ap_http_input_filter_handle,
+ NULL, r, r->connection);
+
+ if (access_status != HTTP_OK
+ || (access_status = ap_run_post_read_request(r))) {
+ /* Request check post hooks failed. An example of this would be a
+ * request for a vhost where h2 is disabled --> 421.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, conn, APLOGNO()
+ "h2_request(%d): access_status=%d, request_create failed",
+ req->id, access_status);
+ ap_die(access_status, r);
+ ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
+ ap_run_log_transaction(r);
+ r = NULL;
+ goto traceout;
+ }
+
+ AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method,
+ (char *)r->uri, (char *)r->server->defn_name,
+ r->status);
+ return r;
+traceout:
+ AP_READ_REQUEST_FAILURE((uintptr_t)r);
+ return r;
+}
+
+
diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h
new file mode 100644
index 00000000..ba48f4a1
--- /dev/null
+++ b/modules/http2/h2_request.h
@@ -0,0 +1,48 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_request__
+#define __mod_h2__h2_request__
+
+#include "h2.h"
+
+apr_status_t h2_request_rwrite(h2_request *req, apr_pool_t *pool,
+ request_rec *r);
+
+apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
+ int eos, int push);
+
+h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
+
+/**
+ * Create a request_rec representing the h2_request to be
+ * processed on the given connection.
+ *
+ * @param req the h2 request to process
+ * @param conn the connection to process the request on
+ * @return the request_rec representing the request
+ */
+request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn);
+
+
+#endif /* defined(__mod_h2__h2_request__) */
diff --git a/modules/http2/h2_response.c b/modules/http2/h2_response.c
new file mode 100644
index 00000000..4cafd355
--- /dev/null
+++ b/modules/http2/h2_response.c
@@ -0,0 +1,205 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <util_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2_filter.h"
+#include "h2_h2.h"
+#include "h2_util.h"
+#include "h2_request.h"
+#include "h2_response.h"
+
+
+static apr_table_t *parse_headers(apr_array_header_t *hlines, apr_pool_t *pool)
+{
+ if (hlines) {
+ apr_table_t *headers = apr_table_make(pool, hlines->nelts);
+ int i;
+
+ for (i = 0; i < hlines->nelts; ++i) {
+ char *hline = ((char **)hlines->elts)[i];
+ char *sep = ap_strchr(hline, ':');
+ if (!sep) {
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, pool,
+ APLOGNO(02955) "h2_response: invalid header[%d] '%s'",
+ i, (char*)hline);
+ /* not valid format, abort */
+ return NULL;
+ }
+ (*sep++) = '\0';
+ while (*sep == ' ' || *sep == '\t') {
+ ++sep;
+ }
+
+ if (!h2_util_ignore_header(hline)) {
+ apr_table_merge(headers, hline, sep);
+ }
+ }
+ return headers;
+ }
+ else {
+ return apr_table_make(pool, 0);
+ }
+}
+
+static const char *get_sos_filter(apr_table_t *notes)
+{
+ return notes? apr_table_get(notes, H2_RESP_SOS_NOTE) : NULL;
+}
+
+static void check_clen(h2_response *response, request_rec *r, apr_pool_t *pool)
+{
+
+ if (r && r->header_only) {
+ response->content_length = 0;
+ }
+ else if (response->headers) {
+ const char *s = apr_table_get(response->headers, "Content-Length");
+ if (s) {
+ char *end;
+ response->content_length = apr_strtoi64(s, &end, 10);
+ if (s == end) {
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL,
+ pool, APLOGNO(02956)
+ "h2_response: content-length"
+ " value not parsed: %s", s);
+ response->content_length = -1;
+ }
+ }
+ }
+}
+
+static h2_response *h2_response_create_int(int stream_id,
+ int rst_error,
+ int http_status,
+ apr_table_t *headers,
+ apr_table_t *notes,
+ apr_pool_t *pool)
+{
+ h2_response *response;
+
+ if (!headers) {
+ return NULL;
+ }
+
+ response = apr_pcalloc(pool, sizeof(h2_response));
+ if (response == NULL) {
+ return NULL;
+ }
+
+ response->stream_id = stream_id;
+ response->rst_error = rst_error;
+ response->http_status = http_status? http_status : 500;
+ response->content_length = -1;
+ response->headers = headers;
+ response->sos_filter = get_sos_filter(notes);
+
+ check_clen(response, NULL, pool);
+ return response;
+}
+
+
+h2_response *h2_response_create(int stream_id,
+ int rst_error,
+ int http_status,
+ apr_array_header_t *hlines,
+ apr_table_t *notes,
+ apr_pool_t *pool)
+{
+ return h2_response_create_int(stream_id, rst_error, http_status,
+ parse_headers(hlines, pool), notes, pool);
+}
+
+h2_response *h2_response_rcreate(int stream_id, request_rec *r,
+ apr_table_t *header, apr_pool_t *pool)
+{
+ h2_response *response = apr_pcalloc(pool, sizeof(h2_response));
+ if (response == NULL) {
+ return NULL;
+ }
+
+ response->stream_id = stream_id;
+ response->http_status = r->status;
+ response->content_length = -1;
+ response->headers = header;
+ response->sos_filter = get_sos_filter(r->notes);
+
+ check_clen(response, r, pool);
+
+ if (response->http_status == HTTP_FORBIDDEN) {
+ const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
+ if (cause) {
+ /* This request triggered a TLS renegotiation that is now allowed
+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, response->http_status, r,
+ APLOGNO(03061)
+ "h2_response(%ld-%d): renegotiate forbidden, cause: %s",
+ (long)r->connection->id, stream_id, cause);
+ response->rst_error = H2_ERR_HTTP_1_1_REQUIRED;
+ }
+ }
+
+ return response;
+}
+
+h2_response *h2_response_die(int stream_id, apr_status_t type,
+ const struct h2_request *req, apr_pool_t *pool)
+{
+ apr_table_t *headers = apr_table_make(pool, 5);
+ char *date = NULL;
+ int status = (type >= 200 && type < 600)? type : 500;
+
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, req->request_time);
+ apr_table_setn(headers, "Date", date);
+ apr_table_setn(headers, "Server", ap_get_server_banner());
+
+ return h2_response_create_int(stream_id, 0, status, headers, NULL, pool);
+}
+
+h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from)
+{
+ h2_response *to = apr_pcalloc(pool, sizeof(h2_response));
+
+ to->stream_id = from->stream_id;
+ to->http_status = from->http_status;
+ to->content_length = from->content_length;
+ to->sos_filter = from->sos_filter;
+ if (from->headers) {
+ to->headers = apr_table_clone(pool, from->headers);
+ }
+ if (from->trailers) {
+ to->trailers = apr_table_clone(pool, from->trailers);
+ }
+ return to;
+}
+
+void h2_response_set_trailers(h2_response *response, apr_table_t *trailers)
+{
+ response->trailers = trailers;
+}
+
diff --git a/modules/http2/h2_response.h b/modules/http2/h2_response.h
new file mode 100644
index 00000000..ca57c532
--- /dev/null
+++ b/modules/http2/h2_response.h
@@ -0,0 +1,73 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_response__
+#define __mod_h2__h2_response__
+
+#include "h2.h"
+
+/**
+ * Create the response from the status and parsed header lines.
+ * @param stream_id id of the stream to create the response for
+ * @param rst_error error for reset or 0
+ * @param http_status http status code of response
+ * @param hlines the text lines of the response header
+ * @param pool the memory pool to use
+ */
+h2_response *h2_response_create(int stream_id,
+ int rst_error,
+ int http_status,
+ apr_array_header_t *hlines,
+ apr_table_t *notes,
+ apr_pool_t *pool);
+
+/**
+ * Create the response from the given request_rec.
+ * @param stream_id id of the stream to create the response for
+ * @param r the request record which was processed
+ * @param header the headers of the response
+ * @param pool the memory pool to use
+ */
+h2_response *h2_response_rcreate(int stream_id, request_rec *r,
+ apr_table_t *header, apr_pool_t *pool);
+
+/**
+ * Create the response for the given error.
+ * @param stream_id id of the stream to create the response for
+ * @param type the error code
+ * @param req the original h2_request
+ * @param pool the memory pool to use
+ */
+h2_response *h2_response_die(int stream_id, apr_status_t type,
+ const struct h2_request *req, apr_pool_t *pool);
+
+/**
+ * Deep copies the response into a new pool.
+ * @param pool the pool to use for the clone
+ * @param from the response to clone
+ * @return the cloned response
+ */
+h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from);
+
+/**
+ * Set the trailers in the reponse. Will replace any existing trailers. Will
+ * *not* clone the table.
+ *
+ * @param response the repsone to set the trailers for
+ * @param trailers the trailers to set
+ */
+void h2_response_set_trailers(h2_response *response, apr_table_t *trailers);
+
+#endif /* defined(__mod_h2__h2_response__) */
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
new file mode 100644
index 00000000..598df177
--- /dev/null
+++ b/modules/http2/h2_session.c
@@ -0,0 +1,2353 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <apr_thread_cond.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+#include <ap_mpm.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_eoc.h"
+#include "h2_bucket_eos.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_filter.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_response.h"
+#include "h2_stream.h"
+#include "h2_from_h1.h"
+#include "h2_task.h"
+#include "h2_session.h"
+#include "h2_util.h"
+#include "h2_version.h"
+#include "h2_workers.h"
+
+
+static int h2_session_status_from_apr_status(apr_status_t rv)
+{
+ if (rv == APR_SUCCESS) {
+ return NGHTTP2_NO_ERROR;
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+ else if (APR_STATUS_IS_EOF(rv)) {
+ return NGHTTP2_ERR_EOF;
+ }
+ return NGHTTP2_ERR_PROTO;
+}
+
+static void update_window(void *ctx, int stream_id, apr_off_t bytes_read)
+{
+ h2_session *session = (h2_session*)ctx;
+ nghttp2_session_consume(session->ngh2, stream_id, bytes_read);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session(%ld-%d): consumed %ld bytes",
+ session->id, stream_id, (long)bytes_read);
+}
+
+static apr_status_t h2_session_receive(void *ctx,
+ const char *data, apr_size_t len,
+ apr_size_t *readlen);
+
+static int is_accepting_streams(h2_session *session);
+static void dispatch_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg);
+
+apr_status_t h2_session_stream_done(h2_session *session, h2_stream *stream)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): EOS bucket cleanup -> done",
+ session->id, stream->id);
+ h2_ihash_remove(session->streams, stream->id);
+ h2_mplx_stream_done(session->mplx, stream);
+
+ dispatch_event(session, H2_SESSION_EV_STREAM_DONE, 0, NULL);
+ return APR_SUCCESS;
+}
+
+typedef struct stream_sel_ctx {
+ h2_session *session;
+ h2_stream *candidate;
+} stream_sel_ctx;
+
+static int find_cleanup_stream(void *udata, void *sdata)
+{
+ stream_sel_ctx *ctx = udata;
+ h2_stream *stream = sdata;
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
+ if (!ctx->session->local.accepting
+ && stream->id > ctx->session->local.accepted_max) {
+ ctx->candidate = stream;
+ return 0;
+ }
+ }
+ else {
+ if (!ctx->session->remote.accepting
+ && stream->id > ctx->session->remote.accepted_max) {
+ ctx->candidate = stream;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void cleanup_streams(h2_session *session)
+{
+ stream_sel_ctx ctx;
+ ctx.session = session;
+ ctx.candidate = NULL;
+ while (1) {
+ h2_ihash_iter(session->streams, find_cleanup_stream, &ctx);
+ if (ctx.candidate) {
+ h2_session_stream_done(session, ctx.candidate);
+ ctx.candidate = NULL;
+ }
+ else {
+ break;
+ }
+ }
+}
+
+h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+ int initiated_on, const h2_request *req)
+{
+ h2_stream * stream;
+ apr_pool_t *stream_pool;
+
+ apr_pool_create(&stream_pool, session->pool);
+ apr_pool_tag(stream_pool, "h2_stream");
+
+ stream = h2_stream_open(stream_id, stream_pool, session,
+ initiated_on, req);
+ nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
+ h2_ihash_add(session->streams, stream);
+
+ if (H2_STREAM_CLIENT_INITIATED(stream_id)) {
+ if (stream_id > session->remote.emitted_max) {
+ ++session->remote.emitted_count;
+ session->remote.emitted_max = stream->id;
+ session->local.accepted_max = stream->id;
+ }
+ }
+ else {
+ if (stream_id > session->local.emitted_max) {
+ ++session->local.emitted_count;
+ session->remote.emitted_max = stream->id;
+ }
+ }
+ dispatch_event(session, H2_SESSION_EV_STREAM_OPEN, 0, NULL);
+
+ return stream;
+}
+
+/**
+ * Determine the importance of streams when scheduling tasks.
+ * - if both stream depend on the same one, compare weights
+ * - if one stream is closer to the root, prioritize that one
+ * - if both are on the same level, use the weight of their root
+ * level ancestors
+ */
+static int spri_cmp(int sid1, nghttp2_stream *s1,
+ int sid2, nghttp2_stream *s2, h2_session *session)
+{
+ nghttp2_stream *p1, *p2;
+
+ p1 = nghttp2_stream_get_parent(s1);
+ p2 = nghttp2_stream_get_parent(s2);
+
+ if (p1 == p2) {
+ int32_t w1, w2;
+
+ w1 = nghttp2_stream_get_weight(s1);
+ w2 = nghttp2_stream_get_weight(s2);
+ return w2 - w1;
+ }
+ else if (!p1) {
+ /* stream 1 closer to root */
+ return -1;
+ }
+ else if (!p2) {
+ /* stream 2 closer to root */
+ return 1;
+ }
+ return spri_cmp(sid1, p1, sid2, p2, session);
+}
+
+static int stream_pri_cmp(int sid1, int sid2, void *ctx)
+{
+ h2_session *session = ctx;
+ nghttp2_stream *s1, *s2;
+
+ s1 = nghttp2_session_find_stream(session->ngh2, sid1);
+ s2 = nghttp2_session_find_stream(session->ngh2, sid2);
+
+ if (s1 == s2) {
+ return 0;
+ }
+ else if (!s1) {
+ return 1;
+ }
+ else if (!s2) {
+ return -1;
+ }
+ return spri_cmp(sid1, s1, sid2, s2, session);
+}
+
+static apr_status_t stream_schedule(h2_session *session,
+ h2_stream *stream, int eos)
+{
+ (void)session;
+ return h2_stream_schedule(stream, eos, h2_session_push_enabled(session),
+ stream_pri_cmp, session);
+}
+
+/*
+ * Callback when nghttp2 wants to send bytes back to the client.
+ */
+static ssize_t send_cb(nghttp2_session *ngh2,
+ const uint8_t *data, size_t length,
+ int flags, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ apr_status_t status;
+
+ (void)ngh2;
+ (void)flags;
+ status = h2_conn_io_write(&session->io, (const char *)data, length);
+ if (status == APR_SUCCESS) {
+ return length;
+ }
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03062)
+ "h2_session: send error");
+ return h2_session_status_from_apr_status(status);
+}
+
+static int on_invalid_frame_recv_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ int error, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ (void)ngh2;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03063)
+ "h2_session(%ld): recv invalid FRAME[%s], frames=%ld/%ld (r/s)",
+ session->id, buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ return 0;
+}
+
+static h2_stream *get_stream(h2_session *session, int stream_id)
+{
+ return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+}
+
+static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id,
+ const uint8_t *data, size_t len, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ apr_status_t status = APR_SUCCESS;
+ h2_stream * stream;
+ int rv;
+
+ (void)flags;
+ if (!is_accepting_streams(session)) {
+ /* ignore */
+ return 0;
+ }
+
+ stream = get_stream(session, stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064)
+ "h2_stream(%ld-%d): on_data_chunk for unknown stream",
+ session->id, (int)stream_id);
+ rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id,
+ NGHTTP2_INTERNAL_ERROR);
+ if (nghttp2_is_fatal(rv)) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ return 0;
+ }
+
+ /* FIXME: enabling setting EOS this way seems to break input handling
+ * in mod_proxy_http2. why? */
+ status = h2_stream_write_data(stream, (const char *)data, len,
+ 0 /*flags & NGHTTP2_FLAG_END_STREAM*/);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): data_chunk_recv, written %ld bytes",
+ session->id, stream_id, (long)len);
+ if (status != APR_SUCCESS) {
+ update_window(session, stream_id, len);
+ rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id,
+ H2_STREAM_RST(stream, H2_ERR_INTERNAL_ERROR));
+ if (nghttp2_is_fatal(rv)) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+ return 0;
+}
+
+static apr_status_t stream_release(h2_session *session,
+ h2_stream *stream,
+ uint32_t error_code)
+{
+ conn_rec *c = session->c;
+ apr_bucket *b;
+ apr_status_t status;
+
+ if (!error_code) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_stream(%ld-%d): handled, closing",
+ session->id, (int)stream->id);
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
+ if (stream->id > session->local.completed_max) {
+ session->local.completed_max = stream->id;
+ }
+ }
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03065)
+ "h2_stream(%ld-%d): closing with err=%d %s",
+ session->id, (int)stream->id, (int)error_code,
+ h2_h2_err_description(error_code));
+ h2_stream_rst(stream, error_code);
+ }
+
+ b = h2_bucket_eos_create(c->bucket_alloc, stream);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ status = h2_conn_io_pass(&session->io, session->bbtmp);
+ apr_brigade_cleanup(session->bbtmp);
+ return status;
+}
+
+static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
+ uint32_t error_code, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *stream;
+
+ (void)ngh2;
+ stream = get_stream(session, stream_id);
+ if (stream) {
+ stream_release(session, stream, error_code);
+ }
+ return 0;
+}
+
+static int on_begin_headers_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame, void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream *s;
+
+ /* We may see HEADERs at the start of a stream or after all DATA
+ * streams to carry trailers. */
+ (void)ngh2;
+ s = get_stream(session, frame->hd.stream_id);
+ if (s) {
+ /* nop */
+ }
+ else {
+ s = h2_session_open_stream(userp, frame->hd.stream_id, 0, NULL);
+ }
+ return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED;
+}
+
+static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags,
+ void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ h2_stream * stream;
+ apr_status_t status;
+
+ (void)flags;
+ if (!is_accepting_streams(session)) {
+ /* just ignore */
+ return 0;
+ }
+
+ stream = get_stream(session, frame->hd.stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
+ APLOGNO(02920)
+ "h2_session: stream(%ld-%d): on_header unknown stream",
+ session->id, (int)frame->hd.stream_id);
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_add_header(stream, (const char *)name, namelen,
+ (const char *)value, valuelen);
+ if (status != APR_SUCCESS && !stream->response) {
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+ return 0;
+}
+
+/**
+ * nghttp2 session has received a complete frame. Most, it uses
+ * for processing of internal state. HEADER and DATA frames however
+ * we need to handle ourself.
+ */
+static int on_frame_recv_cb(nghttp2_session *ng2s,
+ const nghttp2_frame *frame,
+ void *userp)
+{
+ h2_session *session = (h2_session *)userp;
+ apr_status_t status = APR_SUCCESS;
+ h2_stream *stream;
+
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03066)
+ "h2_session(%ld): recv FRAME[%s], frames=%ld/%ld (r/s)",
+ session->id, buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+
+ ++session->frames_received;
+ switch (frame->hd.type) {
+ case NGHTTP2_HEADERS:
+ /* This can be HEADERS for a new stream, defining the request,
+ * or HEADER may come after DATA at the end of a stream as in
+ * trailers */
+ stream = get_stream(session, frame->hd.stream_id);
+ if (stream) {
+ int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
+
+ if (h2_stream_is_scheduled(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_stream(%ld-%d): TRAILER, eos=%d",
+ session->id, frame->hd.stream_id, eos);
+ if (eos) {
+ status = h2_stream_close_input(stream);
+ }
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_stream(%ld-%d): HEADER, eos=%d",
+ session->id, frame->hd.stream_id, eos);
+ status = stream_schedule(session, stream, eos);
+ }
+ }
+ else {
+ status = APR_EINVAL;
+ }
+ break;
+ case NGHTTP2_DATA:
+ stream = get_stream(session, frame->hd.stream_id);
+ if (stream) {
+ int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_stream(%ld-%d): DATA, len=%ld, eos=%d",
+ session->id, frame->hd.stream_id,
+ (long)frame->hd.length, eos);
+ if (eos) {
+ status = h2_stream_close_input(stream);
+ }
+ }
+ else {
+ status = APR_EINVAL;
+ }
+ break;
+ case NGHTTP2_PRIORITY:
+ session->reprioritize = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session: stream(%ld-%d): PRIORITY frame "
+ " weight=%d, dependsOn=%d, exclusive=%d",
+ session->id, (int)frame->hd.stream_id,
+ frame->priority.pri_spec.weight,
+ frame->priority.pri_spec.stream_id,
+ frame->priority.pri_spec.exclusive);
+ break;
+ case NGHTTP2_WINDOW_UPDATE:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session: stream(%ld-%d): WINDOW_UPDATE "
+ "incr=%d",
+ session->id, (int)frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ break;
+ case NGHTTP2_RST_STREAM:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
+ "h2_session(%ld-%d): RST_STREAM by client, errror=%d",
+ session->id, (int)frame->hd.stream_id,
+ (int)frame->rst_stream.error_code);
+ stream = get_stream(session, frame->hd.stream_id);
+ if (stream && stream->request && stream->request->initiated_on) {
+ ++session->pushes_reset;
+ }
+ else {
+ ++session->streams_reset;
+ }
+ break;
+ case NGHTTP2_GOAWAY:
+ session->remote.accepted_max = frame->goaway.last_stream_id;
+ session->remote.error = frame->goaway.error_code;
+ dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY, 0, NULL);
+ break;
+ default:
+ if (APLOGctrace2(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer,
+ sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session: on_frame_rcv %s", buffer);
+ }
+ break;
+ }
+
+ if (status != APR_SUCCESS) {
+ int rv;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ APLOGNO(02923)
+ "h2_session: stream(%ld-%d): error handling frame",
+ session->id, (int)frame->hd.stream_id);
+ rv = nghttp2_submit_rst_stream(ng2s, NGHTTP2_FLAG_NONE,
+ frame->hd.stream_id,
+ NGHTTP2_INTERNAL_ERROR);
+ if (nghttp2_is_fatal(rv)) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+
+ return 0;
+}
+
+static char immortal_zeros[H2_MAX_PADLEN];
+
+static int on_send_data_cb(nghttp2_session *ngh2,
+ nghttp2_frame *frame,
+ const uint8_t *framehd,
+ size_t length,
+ nghttp2_data_source *source,
+ void *userp)
+{
+ apr_status_t status = APR_SUCCESS;
+ h2_session *session = (h2_session *)userp;
+ int stream_id = (int)frame->hd.stream_id;
+ unsigned char padlen;
+ int eos;
+ h2_stream *stream;
+ apr_bucket *b;
+ apr_off_t len = length;
+
+ (void)ngh2;
+ (void)source;
+ if (frame->data.padlen > H2_MAX_PADLEN) {
+ return NGHTTP2_ERR_PROTO;
+ }
+ padlen = (unsigned char)frame->data.padlen;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): send_data_cb for %ld bytes",
+ session->id, (int)stream_id, (long)length);
+
+ stream = get_stream(session, stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c,
+ APLOGNO(02924)
+ "h2_stream(%ld-%d): send_data, lookup stream",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
+ if (padlen && status == APR_SUCCESS) {
+ status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
+ }
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): writing frame header",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ status = h2_stream_read_to(stream, session->bbtmp, &len, &eos);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): send_data_cb, reading stream",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ else if (len != length) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): send_data_cb, wanted %ld bytes, "
+ "got %ld from stream",
+ session->id, (int)stream_id, (long)length, (long)len);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (padlen) {
+ b = apr_bucket_immortal_create(immortal_zeros, padlen,
+ session->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ }
+
+ status = h2_conn_io_pass(&session->io, session->bbtmp);
+
+ apr_brigade_cleanup(session->bbtmp);
+ if (status == APR_SUCCESS) {
+ stream->data_frames_sent++;
+ return 0;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ APLOGNO(02925)
+ "h2_stream(%ld-%d): failed send_data_cb",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+}
+
+static int on_frame_send_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ void *user_data)
+{
+ h2_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ char buffer[256];
+
+ h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03068)
+ "h2_session(%ld): sent FRAME[%s], frames=%ld/%ld (r/s)",
+ session->id, buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ ++session->frames_sent;
+ return 0;
+}
+
+#define NGH2_SET_CALLBACK(callbacks, name, fn)\
+nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
+
+static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
+{
+ int rv = nghttp2_session_callbacks_new(pcb);
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c,
+ APLOGNO(02926) "nghttp2_session_callbacks_new: %s",
+ nghttp2_strerror(rv));
+ return APR_EGENERAL;
+ }
+
+ NGH2_SET_CALLBACK(*pcb, send, send_cb);
+ NGH2_SET_CALLBACK(*pcb, on_frame_recv, on_frame_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_invalid_frame_recv, on_invalid_frame_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_data_chunk_recv, on_data_chunk_recv_cb);
+ NGH2_SET_CALLBACK(*pcb, on_stream_close, on_stream_close_cb);
+ NGH2_SET_CALLBACK(*pcb, on_begin_headers, on_begin_headers_cb);
+ NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb);
+ NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb);
+ NGH2_SET_CALLBACK(*pcb, on_frame_send, on_frame_send_cb);
+
+ return APR_SUCCESS;
+}
+
+static void h2_session_destroy(h2_session *session)
+{
+ AP_DEBUG_ASSERT(session);
+
+ h2_ihash_clear(session->streams);
+ if (session->mplx) {
+ h2_mplx_set_consumed_cb(session->mplx, NULL, NULL);
+ h2_mplx_release_and_join(session->mplx, session->iowait);
+ session->mplx = NULL;
+ }
+
+ ap_remove_input_filter_byhandle((session->r? session->r->input_filters :
+ session->c->input_filters), "H2_IN");
+ if (session->ngh2) {
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ }
+ if (session->c) {
+ h2_ctx_clear(session->c);
+ }
+
+ if (APLOGctrace1(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_session(%ld): destroy", session->id);
+ }
+ if (session->pool) {
+ apr_pool_destroy(session->pool);
+ }
+}
+
+static apr_status_t h2_session_shutdown(h2_session *session, int error,
+ const char *msg, int force_close)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ AP_DEBUG_ASSERT(session);
+ if (!msg && error) {
+ msg = nghttp2_strerror(error);
+ }
+
+ if (error || force_close) {
+ /* not a graceful shutdown, we want to leave...
+ * Do not start further streams that are waiting to be scheduled.
+ * Find out the max stream id that we habe been processed or
+ * are still actively working on.
+ * Remove all streams greater than this number without submitting
+ * a RST_STREAM frame, since that should be clear from the GOAWAY
+ * we send. */
+ session->local.accepted_max = h2_mplx_shutdown(session->mplx);
+ session->local.error = error;
+ }
+ else {
+ /* graceful shutdown. we will continue processing all streams
+ * we have, but no longer accept new ones. Report the max stream
+ * we have received and discard all new ones. */
+ }
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
+ session->local.accepted_max,
+ error, (uint8_t*)msg, msg? strlen(msg):0);
+ status = nghttp2_session_send(session->ngh2);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03069)
+ "session(%ld): sent GOAWAY, err=%d, msg=%s",
+ session->id, error, msg? msg : "");
+ dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
+
+ if (force_close) {
+ h2_mplx_abort(session->mplx);
+ }
+
+ return status;
+}
+
+static apr_status_t session_pool_cleanup(void *data)
+{
+ h2_session *session = data;
+ /* On a controlled connection shutdown, this gets never
+ * called as we deregister and destroy our pool manually.
+ * However when we have an async mpm, and handed it our idle
+ * connection, it will just cleanup once the connection is closed
+ * from the other side (and sometimes even from out side) and
+ * here we arrive then.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "session(%ld): pool_cleanup", session->id);
+
+ if (session->state != H2_SESSION_ST_DONE
+ && session->state != H2_SESSION_ST_LOCAL_SHUTDOWN) {
+ /* Not good. The connection is being torn down and we have
+ * not sent a goaway. This is considered a protocol error and
+ * the client has to assume that any streams "in flight" may have
+ * been processed and are not safe to retry.
+ * As clients with idle connection may only learn about a closed
+ * connection when sending the next request, this has the effect
+ * that at least this one request will fail.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03199)
+ "session(%ld): connection disappeared without proper "
+ "goodbye, clients will be confused, should not happen",
+ session->id);
+ }
+ /* keep us from destroying the pool, since that is already ongoing. */
+ session->pool = NULL;
+ h2_session_destroy(session);
+ return APR_SUCCESS;
+}
+
+static void *session_malloc(size_t size, void *ctx)
+{
+ h2_session *session = ctx;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
+ "h2_session(%ld): malloc(%ld)",
+ session->id, (long)size);
+ return malloc(size);
+}
+
+static void session_free(void *p, void *ctx)
+{
+ h2_session *session = ctx;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
+ "h2_session(%ld): free()",
+ session->id);
+ free(p);
+}
+
+static void *session_calloc(size_t n, size_t size, void *ctx)
+{
+ h2_session *session = ctx;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
+ "h2_session(%ld): calloc(%ld, %ld)",
+ session->id, (long)n, (long)size);
+ return calloc(n, size);
+}
+
+static void *session_realloc(void *p, size_t size, void *ctx)
+{
+ h2_session *session = ctx;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
+ "h2_session(%ld): realloc(%ld)",
+ session->id, (long)size);
+ return realloc(p, size);
+}
+
+static h2_session *h2_session_create_int(conn_rec *c,
+ request_rec *r,
+ h2_ctx *ctx,
+ h2_workers *workers)
+{
+ nghttp2_session_callbacks *callbacks = NULL;
+ nghttp2_option *options = NULL;
+ uint32_t n;
+
+ apr_pool_t *pool = NULL;
+ apr_status_t status = apr_pool_create(&pool, c->pool);
+ h2_session *session;
+ if (status != APR_SUCCESS) {
+ return NULL;
+ }
+ apr_pool_tag(pool, "h2_session");
+
+ session = apr_pcalloc(pool, sizeof(h2_session));
+ if (session) {
+ int rv;
+ nghttp2_mem *mem;
+
+ session->id = c->id;
+ session->c = c;
+ session->r = r;
+ session->s = h2_ctx_server_get(ctx);
+ session->pool = pool;
+ session->config = h2_config_sget(session->s);
+ session->workers = workers;
+
+ session->state = H2_SESSION_ST_INIT;
+ session->local.accepting = 1;
+ session->remote.accepting = 1;
+
+ apr_pool_pre_cleanup_register(pool, session, session_pool_cleanup);
+
+ session->max_stream_count = h2_config_geti(session->config,
+ H2_CONF_MAX_STREAMS);
+ session->max_stream_mem = h2_config_geti(session->config,
+ H2_CONF_STREAM_MAX_MEM);
+
+ status = apr_thread_cond_create(&session->iowait, session->pool);
+ if (status != APR_SUCCESS) {
+ return NULL;
+ }
+
+ session->streams = h2_ihash_create(session->pool,
+ offsetof(h2_stream, id));
+ session->mplx = h2_mplx_create(c, session->pool, session->config,
+ session->s->timeout, workers);
+
+ h2_mplx_set_consumed_cb(session->mplx, update_window, session);
+
+ /* Install the connection input filter that feeds the session */
+ session->cin = h2_filter_cin_create(session->pool,
+ h2_session_receive, session);
+ ap_add_input_filter("H2_IN", session->cin, r, c);
+
+ h2_conn_io_init(&session->io, c, session->config);
+ session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
+
+ status = init_callbacks(c, &callbacks);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927)
+ "nghttp2: error in init_callbacks");
+ h2_session_destroy(session);
+ return NULL;
+ }
+
+ rv = nghttp2_option_new(&options);
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02928) "nghttp2_option_new: %s",
+ nghttp2_strerror(rv));
+ h2_session_destroy(session);
+ return NULL;
+ }
+ nghttp2_option_set_peer_max_concurrent_streams(
+ options, (uint32_t)session->max_stream_count);
+ /* We need to handle window updates ourself, otherwise we
+ * get flooded by nghttp2. */
+ nghttp2_option_set_no_auto_window_update(options, 1);
+
+ if (APLOGctrace6(c)) {
+ mem = apr_pcalloc(session->pool, sizeof(nghttp2_mem));
+ mem->mem_user_data = session;
+ mem->malloc = session_malloc;
+ mem->free = session_free;
+ mem->calloc = session_calloc;
+ mem->realloc = session_realloc;
+
+ rv = nghttp2_session_server_new3(&session->ngh2, callbacks,
+ session, options, mem);
+ }
+ else {
+ rv = nghttp2_session_server_new2(&session->ngh2, callbacks,
+ session, options);
+ }
+ nghttp2_session_callbacks_del(callbacks);
+ nghttp2_option_del(options);
+
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02929) "nghttp2_session_server_new: %s",
+ nghttp2_strerror(rv));
+ h2_session_destroy(session);
+ return NULL;
+ }
+
+ n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
+ session->push_diary = h2_push_diary_create(session->pool, n);
+
+ if (APLOGcdebug(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03200)
+ "h2_session(%ld) created, max_streams=%d, "
+ "stream_mem=%d, workers_limit=%d, workers_max=%d, "
+ "push_diary(type=%d,N=%d)",
+ session->id, (int)session->max_stream_count,
+ (int)session->max_stream_mem,
+ session->mplx->workers_limit,
+ session->mplx->workers_max,
+ session->push_diary->dtype,
+ (int)session->push_diary->N);
+ }
+ }
+ return session;
+}
+
+h2_session *h2_session_create(conn_rec *c, h2_ctx *ctx, h2_workers *workers)
+{
+ return h2_session_create_int(c, NULL, ctx, workers);
+}
+
+h2_session *h2_session_rcreate(request_rec *r, h2_ctx *ctx, h2_workers *workers)
+{
+ return h2_session_create_int(r->connection, r, ctx, workers);
+}
+
+void h2_session_eoc_callback(h2_session *session)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "session(%ld): cleanup and destroy", session->id);
+ apr_pool_cleanup_kill(session->pool, session, session_pool_cleanup);
+ h2_session_destroy(session);
+}
+
+static apr_status_t h2_session_start(h2_session *session, int *rv)
+{
+ apr_status_t status = APR_SUCCESS;
+ nghttp2_settings_entry settings[3];
+ size_t slen;
+ int win_size;
+
+ AP_DEBUG_ASSERT(session);
+ /* Start the conversation by submitting our SETTINGS frame */
+ *rv = 0;
+ if (session->r) {
+ const char *s, *cs;
+ apr_size_t dlen;
+ h2_stream * stream;
+
+ /* 'h2c' mode: we should have a 'HTTP2-Settings' header with
+ * base64 encoded client settings. */
+ s = apr_table_get(session->r->headers_in, "HTTP2-Settings");
+ if (!s) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EINVAL, session->r,
+ APLOGNO(02931)
+ "HTTP2-Settings header missing in request");
+ return APR_EINVAL;
+ }
+ cs = NULL;
+ dlen = h2_util_base64url_decode(&cs, s, session->pool);
+
+ if (APLOGrdebug(session->r)) {
+ char buffer[128];
+ h2_util_hex_dump(buffer, 128, (char*)cs, dlen);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, session->r, APLOGNO(03070)
+ "upgrading h2c session with HTTP2-Settings: %s -> %s (%d)",
+ s, buffer, (int)dlen);
+ }
+
+ *rv = nghttp2_session_upgrade(session->ngh2, (uint8_t*)cs, dlen, NULL);
+ if (*rv != 0) {
+ status = APR_EINVAL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
+ APLOGNO(02932) "nghttp2_session_upgrade: %s",
+ nghttp2_strerror(*rv));
+ return status;
+ }
+
+ /* Now we need to auto-open stream 1 for the request we got. */
+ stream = h2_session_open_stream(session, 1, 0, NULL);
+ if (!stream) {
+ status = APR_EGENERAL;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
+ APLOGNO(02933) "open stream 1: %s",
+ nghttp2_strerror(*rv));
+ return status;
+ }
+
+ status = h2_stream_set_request(stream, session->r);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ status = stream_schedule(session, stream, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ slen = 0;
+ settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
+ settings[slen].value = (uint32_t)session->max_stream_count;
+ ++slen;
+ win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE);
+ if (win_size != H2_INITIAL_WINDOW_SIZE) {
+ settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings[slen].value = win_size;
+ ++slen;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03201)
+ "h2_session(%ld): start, INITIAL_WINDOW_SIZE=%ld, "
+ "MAX_CONCURRENT_STREAMS=%d",
+ session->id, (long)win_size, (int)session->max_stream_count);
+ *rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE,
+ settings, slen);
+ if (*rv != 0) {
+ status = APR_EGENERAL;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ APLOGNO(02935) "nghttp2_submit_settings: %s",
+ nghttp2_strerror(*rv));
+ }
+ else {
+ /* use maximum possible value for connection window size. We are only
+ * interested in per stream flow control. which have the initial window
+ * size configured above.
+ * Therefore, for our use, the connection window can only get in the
+ * way. Example: if we allow 100 streams with a 32KB window each, we
+ * buffer up to 3.2 MB of data. Unless we do separate connection window
+ * interim updates, any smaller connection window will lead to blocking
+ * in DATA flow.
+ */
+ *rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE,
+ 0, NGHTTP2_MAX_WINDOW_SIZE - win_size);
+ if (*rv != 0) {
+ status = APR_EGENERAL;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ APLOGNO(02970) "nghttp2_submit_window_update: %s",
+ nghttp2_strerror(*rv));
+ }
+ }
+
+ return status;
+}
+
+static ssize_t stream_data_cb(nghttp2_session *ng2s,
+ int32_t stream_id,
+ uint8_t *buf,
+ size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *puser)
+{
+ h2_session *session = (h2_session *)puser;
+ apr_off_t nread = length;
+ int eos = 0;
+ apr_status_t status;
+ h2_stream *stream;
+ AP_DEBUG_ASSERT(session);
+
+ /* The session wants to send more DATA for the stream. We need
+ * to find out how much of the requested length we can send without
+ * blocking.
+ * Indicate EOS when we encounter it or DEFERRED if the stream
+ * should be suspended. Beware of trailers.
+ */
+
+ (void)ng2s;
+ (void)buf;
+ (void)source;
+ stream = get_stream(session, stream_id);
+ if (!stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
+ APLOGNO(02937)
+ "h2_stream(%ld-%d): data requested but stream not found",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ AP_DEBUG_ASSERT(!h2_stream_is_suspended(stream));
+
+ status = h2_stream_out_prepare(stream, &nread, &eos);
+ if (nread) {
+ *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
+ }
+
+ switch (status) {
+ case APR_SUCCESS:
+ break;
+
+ case APR_ECONNRESET:
+ return nghttp2_submit_rst_stream(ng2s, NGHTTP2_FLAG_NONE,
+ stream->id, stream->rst_error);
+
+ case APR_EAGAIN:
+ /* If there is no data available, our session will automatically
+ * suspend this stream and not ask for more data until we resume
+ * it. Remember at our h2_stream that we need to do this.
+ */
+ nread = 0;
+ h2_mplx_suspend_stream(session->mplx, stream->id);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03071)
+ "h2_stream(%ld-%d): suspending",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_DEFERRED;
+
+ default:
+ nread = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ APLOGNO(02938) "h2_stream(%ld-%d): reading data",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if (eos) {
+ apr_table_t *trailers = h2_stream_get_trailers(stream);
+ if (trailers && !apr_is_empty_table(trailers)) {
+ h2_ngheader *nh;
+ int rv;
+
+ nh = h2_util_ngheader_make(stream->pool, trailers);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03072)
+ "h2_stream(%ld-%d): submit %d trailers",
+ session->id, (int)stream_id,(int) nh->nvlen);
+ rv = nghttp2_submit_trailer(ng2s, stream->id, nh->nv, nh->nvlen);
+ if (rv < 0) {
+ nread = rv;
+ }
+ *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;
+ }
+
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+
+ return (ssize_t)nread;
+}
+
+typedef struct {
+ nghttp2_nv *nv;
+ size_t nvlen;
+ size_t offset;
+} nvctx_t;
+
+struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
+ h2_push *push)
+{
+ apr_status_t status;
+ h2_stream *stream;
+ h2_ngheader *ngh;
+ int nid;
+
+ ngh = h2_util_ngheader_make_req(is->pool, push->req);
+ nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id,
+ ngh->nv, ngh->nvlen, NULL);
+ if (nid <= 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03075)
+ "h2_stream(%ld-%d): submitting push promise fail: %s",
+ session->id, is->id, nghttp2_strerror(nid));
+ return NULL;
+ }
+ ++session->pushes_promised;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03076)
+ "h2_stream(%ld-%d): SERVER_PUSH %d for %s %s on %d",
+ session->id, is->id, nid,
+ push->req->method, push->req->path, is->id);
+
+ stream = h2_session_open_stream(session, nid, is->id, push->req);
+ if (stream) {
+ status = stream_schedule(session, stream, 1);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ "h2_stream(%ld-%d): scheduling push stream",
+ session->id, stream->id);
+ stream = NULL;
+ }
+ ++session->unsent_promises;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03077)
+ "h2_stream(%ld-%d): failed to create stream obj %d",
+ session->id, is->id, nid);
+ }
+
+ if (!stream) {
+ /* try to tell the client that it should not wait. */
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
+ NGHTTP2_INTERNAL_ERROR);
+ }
+
+ return stream;
+}
+
+static int valid_weight(float f)
+{
+ int w = (int)f;
+ return (w < NGHTTP2_MIN_WEIGHT? NGHTTP2_MIN_WEIGHT :
+ (w > NGHTTP2_MAX_WEIGHT)? NGHTTP2_MAX_WEIGHT : w);
+}
+
+apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
+ const h2_priority *prio)
+{
+ apr_status_t status = APR_SUCCESS;
+#ifdef H2_NG2_CHANGE_PRIO
+ nghttp2_stream *s_grandpa, *s_parent, *s;
+
+ s = nghttp2_session_find_stream(session->ngh2, stream->id);
+ if (!s) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_stream(%ld-%d): lookup of nghttp2_stream failed",
+ session->id, stream->id);
+ return APR_EINVAL;
+ }
+
+ s_parent = nghttp2_stream_get_parent(s);
+ if (s_parent) {
+ nghttp2_priority_spec ps;
+ int id_parent, id_grandpa, w_parent, w, rv = 0;
+ char *ptype = "AFTER";
+ h2_dependency dep = prio->dependency;
+
+ id_parent = nghttp2_stream_get_stream_id(s_parent);
+ s_grandpa = nghttp2_stream_get_parent(s_parent);
+ if (s_grandpa) {
+ id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
+ }
+ else {
+ /* parent of parent does not exist,
+ * only possible if parent == root */
+ dep = H2_DEPENDANT_AFTER;
+ }
+
+ switch (dep) {
+ case H2_DEPENDANT_INTERLEAVED:
+ /* PUSHed stream is to be interleaved with initiating stream.
+ * It is made a sibling of the initiating stream and gets a
+ * proportional weight [1, MAX_WEIGHT] of the initiaing
+ * stream weight.
+ */
+ ptype = "INTERLEAVED";
+ w_parent = nghttp2_stream_get_weight(s_parent);
+ w = valid_weight(w_parent * ((float)prio->weight / NGHTTP2_MAX_WEIGHT));
+ nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
+ break;
+
+ case H2_DEPENDANT_BEFORE:
+ /* PUSHed stream os to be sent BEFORE the initiating stream.
+ * It gets the same weight as the initiating stream, replaces
+ * that stream in the dependency tree and has the initiating
+ * stream as child.
+ */
+ ptype = "BEFORE";
+ w = w_parent = nghttp2_stream_get_weight(s_parent);
+ nghttp2_priority_spec_init(&ps, stream->id, w_parent, 0);
+ id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
+ rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps);
+ if (rv < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03202)
+ "h2_stream(%ld-%d): PUSH BEFORE, weight=%d, "
+ "depends=%d, returned=%d",
+ session->id, id_parent, ps.weight, ps.stream_id, rv);
+ return APR_EGENERAL;
+ }
+ nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
+ break;
+
+ case H2_DEPENDANT_AFTER:
+ /* The PUSHed stream is to be sent after the initiating stream.
+ * Give if the specified weight and let it depend on the intiating
+ * stream.
+ */
+ /* fall through, it's the default */
+ default:
+ nghttp2_priority_spec_init(&ps, id_parent, valid_weight(prio->weight), 0);
+ break;
+ }
+
+
+ rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03203)
+ "h2_stream(%ld-%d): PUSH %s, weight=%d, "
+ "depends=%d, returned=%d",
+ session->id, stream->id, ptype,
+ ps.weight, ps.stream_id, rv);
+ status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
+ }
+#else
+ (void)session;
+ (void)stream;
+ (void)prio;
+ (void)valid_weight;
+#endif
+ return status;
+}
+
+int h2_session_push_enabled(h2_session *session)
+{
+ /* iff we can and they can and want */
+ return (session->remote.accepting /* remote GOAWAY received */
+ && h2_config_geti(session->config, H2_CONF_PUSH)
+ && nghttp2_session_get_remote_settings(session->ngh2,
+ NGHTTP2_SETTINGS_ENABLE_PUSH));
+}
+
+static apr_status_t h2_session_send(h2_session *session)
+{
+ apr_interval_time_t saved_timeout;
+ int rv;
+ apr_socket_t *socket;
+
+ socket = ap_get_conn_socket(session->c);
+ if (socket) {
+ apr_socket_timeout_get(socket, &saved_timeout);
+ apr_socket_timeout_set(socket, session->s->timeout);
+ }
+
+ rv = nghttp2_session_send(session->ngh2);
+
+ if (socket) {
+ apr_socket_timeout_set(socket, saved_timeout);
+ }
+ session->have_written = 1;
+ if (rv != 0) {
+ if (nghttp2_is_fatal(rv)) {
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
+ return APR_EGENERAL;
+ }
+ }
+
+ session->unsent_promises = 0;
+ session->unsent_submits = 0;
+
+ return APR_SUCCESS;
+}
+
+/**
+ * A stream was resumed as new output data arrived.
+ */
+static apr_status_t on_stream_resume(void *ctx, int stream_id)
+{
+ h2_session *session = ctx;
+ h2_stream *stream = get_stream(session, stream_id);
+ apr_status_t status = APR_SUCCESS;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): on_resume", session->id, stream_id);
+ if (stream) {
+ int rv = nghttp2_session_resume_data(session->ngh2, stream_id);
+ session->have_written = 1;
+ ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
+ APLOG_ERR : APLOG_DEBUG, 0, session->c,
+ APLOGNO(02936)
+ "h2_stream(%ld-%d): resuming %s",
+ session->id, stream->id, rv? nghttp2_strerror(rv) : "");
+ }
+ return status;
+}
+
+/**
+ * A response for the stream is ready.
+ */
+static apr_status_t on_stream_response(void *ctx, int stream_id)
+{
+ h2_session *session = ctx;
+ h2_stream *stream = get_stream(session, stream_id);
+ apr_status_t status = APR_SUCCESS;
+ h2_response *response;
+ int rv = 0;
+
+ AP_DEBUG_ASSERT(session);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): on_response", session->id, stream_id);
+ if (!stream) {
+ return APR_NOTFOUND;
+ }
+
+ response = h2_stream_get_response(stream);
+ AP_DEBUG_ASSERT(response || stream->rst_error);
+
+ if (stream->submitted) {
+ rv = NGHTTP2_PROTOCOL_ERROR;
+ }
+ else if (response && response->headers) {
+ nghttp2_data_provider provider, *pprovider = NULL;
+ h2_ngheader *ngh;
+ const h2_priority *prio;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03073)
+ "h2_stream(%ld-%d): submit response %d, REMOTE_WINDOW_SIZE=%u",
+ session->id, stream->id, response->http_status,
+ (unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id));
+
+ if (response->content_length != 0) {
+ memset(&provider, 0, sizeof(provider));
+ provider.source.fd = stream->id;
+ provider.read_callback = stream_data_cb;
+ pprovider = &provider;
+ }
+
+ /* If this stream is not a pushed one itself,
+ * and HTTP/2 server push is enabled here,
+ * and the response is in the range 200-299 *),
+ * and the remote side has pushing enabled,
+ * -> find and perform any pushes on this stream
+ * *before* we submit the stream response itself.
+ * This helps clients avoid opening new streams on Link
+ * headers that get pushed right afterwards.
+ *
+ * *) the response code is relevant, as we do not want to
+ * make pushes on 401 or 403 codes, neiterh on 301/302
+ * and friends. And if we see a 304, we do not push either
+ * as the client, having this resource in its cache, might
+ * also have the pushed ones as well.
+ */
+ if (stream->request && !stream->request->initiated_on
+ && H2_HTTP_2XX(response->http_status)
+ && h2_session_push_enabled(session)) {
+
+ h2_stream_submit_pushes(stream);
+ }
+
+ prio = h2_stream_get_priority(stream);
+ if (prio) {
+ h2_session_set_prio(session, stream, prio);
+ /* no showstopper if that fails for some reason */
+ }
+
+ ngh = h2_util_ngheader_make_res(stream->pool, response->http_status,
+ response->headers);
+ rv = nghttp2_submit_response(session->ngh2, response->stream_id,
+ ngh->nv, ngh->nvlen, pprovider);
+ }
+ else {
+ int err = H2_STREAM_RST(stream, H2_ERR_PROTOCOL_ERROR);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03074)
+ "h2_stream(%ld-%d): RST_STREAM, err=%d",
+ session->id, stream->id, err);
+
+ rv = nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, err);
+ }
+
+ stream->submitted = 1;
+ session->have_written = 1;
+
+ if (stream->request && stream->request->initiated_on) {
+ ++session->pushes_submitted;
+ }
+ else {
+ ++session->responses_submitted;
+ }
+
+ if (nghttp2_is_fatal(rv)) {
+ status = APR_EGENERAL;
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ APLOGNO(02940) "submit_response: %s",
+ nghttp2_strerror(rv));
+ }
+
+ ++session->unsent_submits;
+
+ /* Unsent push promises are written immediately, as nghttp2
+ * 1.5.0 realizes internal stream data structures only on
+ * send and we might need them for other submits.
+ * Also, to conserve memory, we send at least every 10 submits
+ * so that nghttp2 does not buffer all outbound items too
+ * long.
+ */
+ if (status == APR_SUCCESS
+ && (session->unsent_promises || session->unsent_submits > 10)) {
+ status = h2_session_send(session);
+ }
+ return status;
+}
+
+static apr_status_t h2_session_receive(void *ctx, const char *data,
+ apr_size_t len, apr_size_t *readlen)
+{
+ h2_session *session = ctx;
+ ssize_t n;
+
+ if (len > 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session(%ld): feeding %ld bytes to nghttp2",
+ session->id, (long)len);
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, (int)n, nghttp2_strerror(n));
+ return APR_EGENERAL;
+ }
+ }
+ else {
+ *readlen = n;
+ session->io.bytes_read += n;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t h2_session_read(h2_session *session, int block)
+{
+ apr_status_t status, rstatus = APR_EAGAIN;
+ conn_rec *c = session->c;
+ apr_off_t read_start = session->io.bytes_read;
+
+ while (1) {
+ /* H2_IN filter handles all incoming data against the session.
+ * We just pull at the filter chain to make it happen */
+ status = ap_get_brigade(c->input_filters,
+ session->bbtmp, AP_MODE_READBYTES,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ /* get rid of any possible data we do not expect to get */
+ apr_brigade_cleanup(session->bbtmp);
+
+ switch (status) {
+ case APR_SUCCESS:
+ /* successful read, reset our idle timers */
+ rstatus = APR_SUCCESS;
+ if (block) {
+ /* successfull blocked read, try unblocked to
+ * get more. */
+ block = 0;
+ }
+ break;
+ case APR_EAGAIN:
+ return rstatus;
+ case APR_TIMEUP:
+ return status;
+ default:
+ if (session->io.bytes_read == read_start) {
+ /* first attempt failed */
+ if (APR_STATUS_IS_ETIMEDOUT(status)
+ || APR_STATUS_IS_ECONNABORTED(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_EBADF(status)) {
+ /* common status for a client that has left */
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_session(%ld): input gone", session->id);
+ }
+ else {
+ /* uncommon status, log on INFO so that we see this */
+ ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
+ APLOGNO(02950)
+ "h2_session(%ld): error reading, terminating",
+ session->id);
+ }
+ return status;
+ }
+ /* subsequent failure after success(es), return initial
+ * status. */
+ return rstatus;
+ }
+ if (!is_accepting_streams(session)) {
+ break;
+ }
+ if ((session->io.bytes_read - read_start) > (64*1024)) {
+ /* read enough in one go, give write a chance */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+ "h2_session(%ld): read 64k, returning", session->id);
+ break;
+ }
+ }
+ return rstatus;
+}
+
+static int unsubmitted_iter(void *ctx, void *val)
+{
+ h2_stream *stream = val;
+ if (h2_stream_needs_submit(stream)) {
+ *((int *)ctx) = 1;
+ return 0;
+ }
+ return 1;
+}
+
+static int has_unsubmitted_streams(h2_session *session)
+{
+ int has_unsubmitted = 0;
+ h2_ihash_iter(session->streams, unsubmitted_iter, &has_unsubmitted);
+ return has_unsubmitted;
+}
+
+static int suspended_iter(void *ctx, void *val)
+{
+ h2_stream *stream = val;
+ if (h2_stream_is_suspended(stream)) {
+ *((int *)ctx) = 1;
+ return 0;
+ }
+ return 1;
+}
+
+static int has_suspended_streams(h2_session *session)
+{
+ int has_suspended = 0;
+ h2_ihash_iter(session->streams, suspended_iter, &has_suspended);
+ return has_suspended;
+}
+
+static const char *StateNames[] = {
+ "INIT", /* H2_SESSION_ST_INIT */
+ "DONE", /* H2_SESSION_ST_DONE */
+ "IDLE", /* H2_SESSION_ST_IDLE */
+ "BUSY", /* H2_SESSION_ST_BUSY */
+ "WAIT", /* H2_SESSION_ST_WAIT */
+ "LSHUTDOWN", /* H2_SESSION_ST_LOCAL_SHUTDOWN */
+ "RSHUTDOWN", /* H2_SESSION_ST_REMOTE_SHUTDOWN */
+};
+
+static const char *state_name(h2_session_state state)
+{
+ if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
+ return "unknown";
+ }
+ return StateNames[state];
+}
+
+static int is_accepting_streams(h2_session *session)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void update_child_status(h2_session *session, int status, const char *msg)
+{
+ /* Assume that we also change code/msg when something really happened and
+ * avoid updating the scoreboard in between */
+ if (session->last_status_code != status
+ || session->last_status_msg != msg) {
+ apr_snprintf(session->status, sizeof(session->status),
+ "%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)",
+ msg? msg : "-",
+ (int)session->open_streams,
+ (int)session->remote.emitted_count,
+ (int)session->responses_submitted,
+ (int)session->pushes_submitted,
+ (int)session->pushes_reset + session->streams_reset);
+ ap_update_child_status_descr(session->c->sbh, status, session->status);
+ }
+}
+
+static void transit(h2_session *session, const char *action, h2_session_state nstate)
+{
+ if (session->state != nstate) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03078)
+ "h2_session(%ld): transit [%s] -- %s --> [%s]", session->id,
+ state_name(session->state), action, state_name(nstate));
+ session->state = nstate;
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ update_child_status(session, (session->open_streams == 0?
+ SERVER_BUSY_KEEPALIVE
+ : SERVER_BUSY_READ), "idle");
+ break;
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ update_child_status(session, SERVER_CLOSING, "remote goaway");
+ break;
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ update_child_status(session, SERVER_CLOSING, "local goaway");
+ break;
+ case H2_SESSION_ST_DONE:
+ update_child_status(session, SERVER_CLOSING, "done");
+ break;
+ default:
+ /* nop */
+ break;
+ }
+ }
+}
+
+static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ transit(session, "init", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg)
+{
+ session->local.accepting = 0;
+ cleanup_streams(session);
+ switch (session->state) {
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* already did that? */
+ break;
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ /* all done */
+ transit(session, "local goaway", H2_SESSION_ST_DONE);
+ break;
+ default:
+ transit(session, "local goaway", H2_SESSION_ST_LOCAL_SHUTDOWN);
+ break;
+ }
+}
+
+static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg)
+{
+ session->remote.accepting = 0;
+ cleanup_streams(session);
+ switch (session->state) {
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ /* already received that? */
+ break;
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* all done */
+ transit(session, "remote goaway", H2_SESSION_ST_DONE);
+ break;
+ default:
+ transit(session, "remote goaway", H2_SESSION_ST_REMOTE_SHUTDOWN);
+ break;
+ }
+}
+
+static void h2_session_ev_conn_error(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_DONE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "conn error", H2_SESSION_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03401)
+ "h2_session(%ld): conn error -> shutdown", session->id);
+ h2_session_shutdown(session, arg, msg, 0);
+ break;
+ }
+}
+
+static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* just leave */
+ transit(session, "proto error", H2_SESSION_ST_DONE);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03402)
+ "h2_session(%ld): proto error -> shutdown", session->id);
+ h2_session_shutdown(session, arg, msg, 0);
+ break;
+ }
+}
+
+static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ transit(session, "conn timeout", H2_SESSION_ST_DONE);
+ break;
+ default:
+ h2_session_shutdown(session, arg, msg, 1);
+ transit(session, "conn timeout", H2_SESSION_ST_DONE);
+ break;
+ }
+}
+
+static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ /* Nothing to READ, nothing to WRITE on the master connection.
+ * Possible causes:
+ * - we wait for the client to send us sth
+ * - we wait for started tasks to produce output
+ * - we have finished all streams and the client has sent GO_AWAY
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session(%ld): NO_IO event, %d streams open",
+ session->id, session->open_streams);
+ if (session->open_streams > 0) {
+ if (has_unsubmitted_streams(session)
+ || has_suspended_streams(session)) {
+ /* waiting for at least one stream to produce data */
+ transit(session, "no io", H2_SESSION_ST_WAIT);
+ }
+ else {
+ /* we have streams open, and all are submitted and none
+ * is suspended. The only thing keeping us from WRITEing
+ * more must be the flow control.
+ * This means we only wait for WINDOW_UPDATE from the
+ * client and can block on READ. */
+ transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE);
+ session->idle_until = apr_time_now() + session->s->timeout;
+ session->keep_sync_until = session->idle_until;
+ /* Make sure we have flushed all previously written output
+ * so that the client will react. */
+ if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ return;
+ }
+ }
+ }
+ else if (is_accepting_streams(session)) {
+ /* When we have no streams, but accept new, switch to idle */
+ apr_time_t now = apr_time_now();
+ transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
+ session->idle_until = (session->remote.emitted_count?
+ session->s->keep_alive_timeout :
+ session->s->timeout) + now;
+ session->keep_sync_until = now + apr_time_from_sec(1);
+ }
+ else {
+ /* We are no longer accepting new streams and there are
+ * none left. Time to leave. */
+ h2_session_shutdown(session, arg, msg, 0);
+ transit(session, "no io", H2_SESSION_ST_DONE);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_stream_ready(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_WAIT:
+ transit(session, "stream ready", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_data_read(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_WAIT:
+ transit(session, "data read", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ /* nop */
+ break;
+ default:
+ transit(session, "nghttp2 done", H2_SESSION_ST_DONE);
+ break;
+ }
+}
+
+static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* nop */
+ break;
+ default:
+ h2_session_shutdown(session, arg, msg, 0);
+ break;
+ }
+}
+
+static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_DONE:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ /* nop */
+ break;
+ default:
+ h2_session_shutdown(session, arg, msg, 1);
+ break;
+ }
+}
+
+static void h2_session_ev_stream_open(h2_session *session, int arg, const char *msg)
+{
+ ++session->open_streams;
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (session->open_streams == 1) {
+ /* enter tiomeout, since we have a stream again */
+ session->idle_until = (session->s->timeout + apr_time_now());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void h2_session_ev_stream_done(h2_session *session, int arg, const char *msg)
+{
+ --session->open_streams;
+ switch (session->state) {
+ case H2_SESSION_ST_IDLE:
+ if (session->open_streams == 0) {
+ /* enter keepalive timeout, since we no longer have streams */
+ session->idle_until = (session->s->keep_alive_timeout
+ + apr_time_now());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void dispatch_event(h2_session *session, h2_session_event_t ev,
+ int arg, const char *msg)
+{
+ switch (ev) {
+ case H2_SESSION_EV_INIT:
+ h2_session_ev_init(session, arg, msg);
+ break;
+ case H2_SESSION_EV_LOCAL_GOAWAY:
+ h2_session_ev_local_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_REMOTE_GOAWAY:
+ h2_session_ev_remote_goaway(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_ERROR:
+ h2_session_ev_conn_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_PROTO_ERROR:
+ h2_session_ev_proto_error(session, arg, msg);
+ break;
+ case H2_SESSION_EV_CONN_TIMEOUT:
+ h2_session_ev_conn_timeout(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NO_IO:
+ h2_session_ev_no_io(session, arg, msg);
+ break;
+ case H2_SESSION_EV_STREAM_READY:
+ h2_session_ev_stream_ready(session, arg, msg);
+ break;
+ case H2_SESSION_EV_DATA_READ:
+ h2_session_ev_data_read(session, arg, msg);
+ break;
+ case H2_SESSION_EV_NGH2_DONE:
+ h2_session_ev_ngh2_done(session, arg, msg);
+ break;
+ case H2_SESSION_EV_MPM_STOPPING:
+ h2_session_ev_mpm_stopping(session, arg, msg);
+ break;
+ case H2_SESSION_EV_PRE_CLOSE:
+ h2_session_ev_pre_close(session, arg, msg);
+ break;
+ case H2_SESSION_EV_STREAM_OPEN:
+ h2_session_ev_stream_open(session, arg, msg);
+ break;
+ case H2_SESSION_EV_STREAM_DONE:
+ h2_session_ev_stream_done(session, arg, msg);
+ break;
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_session(%ld): unknown event %d",
+ session->id, ev);
+ break;
+ }
+
+ if (session->state == H2_SESSION_ST_DONE) {
+ h2_mplx_abort(session->mplx);
+ }
+}
+
+static const int MAX_WAIT_MICROS = 200 * 1000;
+
+apr_status_t h2_session_process(h2_session *session, int async)
+{
+ apr_status_t status = APR_SUCCESS;
+ conn_rec *c = session->c;
+ int rv, mpm_state, trace = APLOGctrace3(c);
+
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): process start, async=%d",
+ session->id, async);
+ }
+
+ if (c->cs) {
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
+ }
+
+ while (1) {
+ trace = APLOGctrace3(c);
+ session->have_read = session->have_written = 0;
+
+ if (!ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ if (mpm_state == AP_MPMQ_STOPPING) {
+ dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL);
+ break;
+ }
+ }
+
+ session->status[0] = '\0';
+
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ if (!h2_is_acceptable_connection(c, 1)) {
+ update_child_status(session, SERVER_BUSY_READ, "inadequate security");
+ h2_session_shutdown(session, NGHTTP2_INADEQUATE_SECURITY, NULL, 1);
+ }
+ else {
+ update_child_status(session, SERVER_BUSY_READ, "init");
+ status = h2_session_start(session, &rv);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03079)
+ "h2_session(%ld): started on %s:%d", session->id,
+ session->s->server_hostname,
+ c->local_addr->port);
+ if (status != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
+ }
+ break;
+
+ case H2_SESSION_ST_IDLE:
+ /* make certain, we send everything before we idle */
+ if (!session->keep_sync_until && async && !session->open_streams
+ && !session->r && session->remote.emitted_count) {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): async idle, nonblock read, "
+ "%d streams open", session->id,
+ session->open_streams);
+ }
+ /* We do not return to the async mpm immediately, since under
+ * load, mpms show the tendency to throw keep_alive connections
+ * away very rapidly.
+ * So, if we are still processing streams, we wait for the
+ * normal timeout first and, on timeout, close.
+ * If we have no streams, we still wait a short amount of
+ * time here for the next frame to arrive, before handing
+ * it to keep_alive processing of the mpm.
+ */
+ status = h2_session_read(session, 0);
+
+ if (status == APR_SUCCESS) {
+ session->have_read = 1;
+ dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
+ }
+ else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
+ if (apr_time_now() > session->idle_until) {
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
+ }
+ else {
+ status = APR_EAGAIN;
+ goto out;
+ }
+ }
+ else {
+ ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
+ APLOGNO(03403)
+ "h2_session(%ld): idle, no data, error",
+ session->id);
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "timeout");
+ }
+ }
+ else {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): sync idle, stutter 1-sec, "
+ "%d streams open", session->id,
+ session->open_streams);
+ }
+ /* We wait in smaller increments, using a 1 second timeout.
+ * That gives us the chance to check for MPMQ_STOPPING often.
+ */
+ status = h2_mplx_idle(session->mplx);
+ if (status != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_ENHANCE_YOUR_CALM, "less is more");
+ }
+ h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1));
+ status = h2_session_read(session, 1);
+ if (status == APR_SUCCESS) {
+ session->have_read = 1;
+ dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
+ }
+ else if (status == APR_EAGAIN) {
+ /* nothing to read */
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ apr_time_t now = apr_time_now();
+ if (now > session->keep_sync_until) {
+ /* if we are on an async mpm, now is the time that
+ * we may dare to pass control to it. */
+ session->keep_sync_until = 0;
+ }
+ if (now > session->idle_until) {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): keepalive timeout",
+ session->id);
+ }
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
+ }
+ else if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): keepalive, %f sec left",
+ session->id, (session->idle_until - now) / 1000000.0f);
+ }
+ /* continue reading handling */
+ }
+ else {
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): idle(1 sec timeout) "
+ "read failed", session->id);
+ }
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
+ }
+ }
+
+ break;
+
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_LOCAL_SHUTDOWN:
+ case H2_SESSION_ST_REMOTE_SHUTDOWN:
+ if (nghttp2_session_want_read(session->ngh2)) {
+ ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL);
+ h2_filter_cin_timeout_set(session->cin, session->s->timeout);
+ status = h2_session_read(session, 0);
+ if (status == APR_SUCCESS) {
+ session->have_read = 1;
+ dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
+ }
+ else if (status == APR_EAGAIN) {
+ /* nothing to read */
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
+ break;
+ }
+ else {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ }
+
+ /* trigger window updates, stream resumes and submits */
+ status = h2_mplx_dispatch_master_events(session->mplx,
+ on_stream_resume,
+ on_stream_response,
+ session);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): dispatch error",
+ session->id);
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR,
+ "dispatch error");
+ break;
+ }
+
+ if (nghttp2_session_want_write(session->ngh2)) {
+ ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
+ status = h2_session_send(session);
+ if (status != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "writing");
+ break;
+ }
+ }
+
+ if (session->have_read || session->have_written) {
+ if (session->wait_us) {
+ session->wait_us = 0;
+ }
+ }
+ else if (!nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_SESSION_EV_NO_IO, 0, NULL);
+ }
+ break;
+
+ case H2_SESSION_ST_WAIT:
+ if (session->wait_us <= 0) {
+ session->wait_us = 10;
+ session->start_wait = apr_time_now();
+ if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ break;
+ }
+ }
+ else if ((apr_time_now() - session->start_wait) >= session->s->timeout) {
+ /* waited long enough */
+ if (trace) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, APR_TIMEUP, c,
+ "h2_session: wait for data");
+ }
+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
+ break;
+ }
+ else {
+ /* repeating, increase timer for graceful backoff */
+ session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS);
+ }
+
+ if (trace) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
+ "h2_session: wait for data, %ld micros",
+ (long)session->wait_us);
+ }
+ status = h2_mplx_out_trywait(session->mplx, session->wait_us,
+ session->iowait);
+ if (status == APR_SUCCESS) {
+ session->wait_us = 0;
+ dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
+ }
+ else if (APR_STATUS_IS_TIMEUP(status)) {
+ /* go back to checking all inputs again */
+ transit(session, "wait cycle", session->local.accepting?
+ H2_SESSION_ST_BUSY : H2_SESSION_ST_LOCAL_SHUTDOWN);
+ }
+ else if (APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_ECONNABORTED(status)) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c,
+ APLOGNO(03404)
+ "h2_session(%ld): waiting on conditional",
+ session->id);
+ h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR,
+ "cond wait error", 0);
+ }
+ break;
+
+ case H2_SESSION_ST_DONE:
+ status = APR_EOF;
+ goto out;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(03080)
+ "h2_session(%ld): unknown state %d", session->id, session->state);
+ dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL);
+ break;
+ }
+
+ if (!nghttp2_session_want_read(session->ngh2)
+ && !nghttp2_session_want_write(session->ngh2)) {
+ dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
+ }
+ if (session->reprioritize) {
+ h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
+ session->reprioritize = 0;
+ }
+ }
+
+out:
+ if (trace) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ "h2_session(%ld): [%s] process returns",
+ session->id, state_name(session->state));
+ }
+
+ if ((session->state != H2_SESSION_ST_DONE)
+ && (APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_ECONNABORTED(status))) {
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+
+ status = (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS;
+ if (session->state == H2_SESSION_ST_DONE) {
+ if (!session->eoc_written) {
+ session->eoc_written = 1;
+ h2_conn_io_write_eoc(&session->io, session);
+ }
+ }
+
+ return status;
+}
+
+apr_status_t h2_session_pre_close(h2_session *session, int async)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ "h2_session(%ld): pre_close", session->id);
+ dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0, "timeout");
+ return APR_SUCCESS;
+}
diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
new file mode 100644
index 00000000..c5c5b7ae
--- /dev/null
+++ b/modules/http2/h2_session.h
@@ -0,0 +1,241 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_session__
+#define __mod_h2__h2_session__
+
+#include "h2_conn_io.h"
+
+/**
+ * A HTTP/2 connection, a session with a specific client.
+ *
+ * h2_session sits on top of a httpd conn_rec* instance and takes complete
+ * control of the connection data. It receives protocol frames from the
+ * client. For new HTTP/2 streams it creates h2_task(s) that are sent
+ * via callback to a dispatcher (see h2_conn.c).
+ * h2_session keeps h2_io's for each ongoing stream which buffer the
+ * payload for that stream.
+ *
+ * New incoming HEADER frames are converted into a h2_stream+h2_task instance
+ * that both represent a HTTP/2 stream, but may have separate lifetimes. This
+ * allows h2_task to be scheduled in other threads without semaphores
+ * all over the place. It allows task memory to be freed independant of
+ * session lifetime and sessions may close down while tasks are still running.
+ *
+ *
+ */
+
+#include "h2.h"
+
+struct apr_thread_mutext_t;
+struct apr_thread_cond_t;
+struct h2_ctx;
+struct h2_config;
+struct h2_filter_cin;
+struct h2_ihash_t;
+struct h2_mplx;
+struct h2_priority;
+struct h2_push;
+struct h2_push_diary;
+struct h2_response;
+struct h2_session;
+struct h2_stream;
+struct h2_task;
+struct h2_workers;
+
+struct nghttp2_session;
+
+typedef enum {
+ H2_SESSION_EV_INIT, /* session was initialized */
+ H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
+ H2_SESSION_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
+ H2_SESSION_EV_CONN_ERROR, /* connection error */
+ H2_SESSION_EV_PROTO_ERROR, /* protocol error */
+ H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */
+ H2_SESSION_EV_NO_IO, /* nothing has been read or written */
+ H2_SESSION_EV_STREAM_READY, /* stream signalled availability of headers/data */
+ H2_SESSION_EV_DATA_READ, /* connection data has been read */
+ H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
+ H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
+ H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
+ H2_SESSION_EV_STREAM_OPEN, /* stream has been opened */
+ H2_SESSION_EV_STREAM_DONE, /* stream has been handled completely */
+} h2_session_event_t;
+
+typedef struct h2_session {
+ long id; /* identifier of this session, unique
+ * inside a httpd process */
+ conn_rec *c; /* the connection this session serves */
+ request_rec *r; /* the request that started this in case
+ * of 'h2c', NULL otherwise */
+ server_rec *s; /* server/vhost we're starting on */
+ const struct h2_config *config; /* Relevant config for this session */
+ apr_pool_t *pool; /* pool to use in session */
+ struct h2_mplx *mplx; /* multiplexer for stream data */
+ struct h2_workers *workers; /* for executing stream tasks */
+ struct h2_filter_cin *cin; /* connection input filter context */
+ h2_conn_io io; /* io on httpd conn filters */
+ struct h2_ihash_t *streams; /* streams handled by this session */
+ struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
+
+ h2_session_state state; /* state session is in */
+
+ h2_session_props local; /* properties of local session */
+ h2_session_props remote; /* properites of remote session */
+
+ unsigned int reprioritize : 1; /* scheduled streams priority changed */
+ unsigned int eoc_written : 1; /* h2 eoc bucket written */
+ unsigned int flush : 1; /* flushing output necessary */
+ unsigned int have_read : 1; /* session has read client data */
+ unsigned int have_written : 1; /* session did write data to client */
+ apr_interval_time_t wait_us; /* timout during BUSY_WAIT state, micro secs */
+
+ struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
+
+ int open_streams; /* number of streams open */
+ int unsent_submits; /* number of submitted, but not yet written responses. */
+ int unsent_promises; /* number of submitted, but not yet written push promised */
+
+ int responses_submitted; /* number of http/2 responses submitted */
+ int streams_reset; /* number of http/2 streams reset by client */
+ int pushes_promised; /* number of http/2 push promises submitted */
+ int pushes_submitted; /* number of http/2 pushed responses submitted */
+ int pushes_reset; /* number of http/2 pushed reset by client */
+
+ apr_size_t frames_received; /* number of http/2 frames received */
+ apr_size_t frames_sent; /* number of http/2 frames sent */
+
+ apr_size_t max_stream_count; /* max number of open streams */
+ apr_size_t max_stream_mem; /* max buffer memory for a single stream */
+
+ apr_time_t start_wait; /* Time we started waiting for sth. to happen */
+ apr_time_t idle_until; /* Time we shut down due to sheer boredom */
+ apr_time_t keep_sync_until; /* Time we sync wait until passing to async mpm */
+
+ apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */
+ struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */
+
+ char status[64]; /* status message for scoreboard */
+ int last_status_code; /* the one already reported */
+ const char *last_status_msg; /* the one already reported */
+} h2_session;
+
+
+/**
+ * Create a new h2_session for the given connection.
+ * The session will apply the configured parameter.
+ * @param c the connection to work on
+ * @param cfg the module config to apply
+ * @param workers the worker pool to use
+ * @return the created session
+ */
+h2_session *h2_session_create(conn_rec *c, struct h2_ctx *ctx,
+ struct h2_workers *workers);
+
+/**
+ * Create a new h2_session for the given request.
+ * The session will apply the configured parameter.
+ * @param r the request that was upgraded
+ * @param cfg the module config to apply
+ * @param workers the worker pool to use
+ * @return the created session
+ */
+h2_session *h2_session_rcreate(request_rec *r, struct h2_ctx *ctx,
+ struct h2_workers *workers);
+
+/**
+ * Process the given HTTP/2 session until it is ended or a fatal
+ * error occured.
+ *
+ * @param session the sessionm to process
+ */
+apr_status_t h2_session_process(h2_session *session, int async);
+
+/**
+ * Last chance to do anything before the connection is closed.
+ */
+apr_status_t h2_session_pre_close(h2_session *session, int async);
+
+/**
+ * Cleanup the session and all objects it still contains. This will not
+ * destroy h2_task instances that have not finished yet.
+ * @param session the session to destroy
+ */
+void h2_session_eoc_callback(h2_session *session);
+
+/**
+ * Called when a serious error occured and the session needs to terminate
+ * without further connection io.
+ * @param session the session to abort
+ * @param reason the apache status that caused the abort
+ */
+void h2_session_abort(h2_session *session, apr_status_t reason);
+
+/**
+ * Close and deallocate the given session.
+ */
+void h2_session_close(h2_session *session);
+
+/* Start submitting the response to a stream request. This is possible
+ * once we have all the response headers. */
+apr_status_t h2_session_handle_response(h2_session *session,
+ struct h2_stream *stream);
+
+/**
+ * Create and register a new stream under the given id.
+ *
+ * @param session the session to register in
+ * @param stream_id the new stream identifier
+ * @param initiated_on the stream id this one is initiated on or 0
+ * @param req the request for this stream or NULL if not known yet
+ * @return the new stream
+ */
+struct h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+ int initiated_on,
+ const h2_request *req);
+
+
+/**
+ * Returns if client settings have push enabled.
+ * @param != 0 iff push is enabled in client settings
+ */
+int h2_session_push_enabled(h2_session *session);
+
+/**
+ * Destroy the stream and release it everywhere. Reclaim all resources.
+ * @param session the session to which the stream belongs
+ * @param stream the stream to destroy
+ */
+apr_status_t h2_session_stream_done(h2_session *session,
+ struct h2_stream *stream);
+
+/**
+ * Submit a push promise on the stream and schedule the new steam for
+ * processing..
+ *
+ * @param session the session to work in
+ * @param is the stream initiating the push
+ * @param push the push to promise
+ * @return the new promised stream or NULL
+ */
+struct h2_stream *h2_session_push(h2_session *session,
+ struct h2_stream *is, struct h2_push *push);
+
+apr_status_t h2_session_set_prio(h2_session *session,
+ struct h2_stream *stream,
+ const struct h2_priority *prio);
+
+
+#endif /* defined(__mod_h2__h2_session__) */
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
new file mode 100644
index 00000000..a7a67641
--- /dev/null
+++ b/modules/http2/h2_stream.c
@@ -0,0 +1,697 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_log.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_conn.h"
+#include "h2_config.h"
+#include "h2_h2.h"
+#include "h2_filter.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_response.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_ctx.h"
+#include "h2_task.h"
+#include "h2_util.h"
+
+
+static int state_transition[][7] = {
+ /* ID OP RL RR CI CO CL */
+/*ID*/{ 1, 0, 0, 0, 0, 0, 0 },
+/*OP*/{ 1, 1, 0, 0, 0, 0, 0 },
+/*RL*/{ 0, 0, 1, 0, 0, 0, 0 },
+/*RR*/{ 0, 0, 0, 1, 0, 0, 0 },
+/*CI*/{ 1, 1, 0, 0, 1, 0, 0 },
+/*CO*/{ 1, 1, 0, 0, 0, 1, 0 },
+/*CL*/{ 1, 1, 0, 0, 1, 1, 1 },
+};
+
+static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, char *tag)
+{
+ if (APLOG_C_IS_LEVEL(s->session->c, lvl)) {
+ conn_rec *c = s->session->c;
+ char buffer[4 * 1024];
+ const char *line = "(null)";
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
+
+ len = h2_util_bb_print(buffer, bmax, tag, "", s->buffer);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%ld-%d): %s",
+ c->id, s->id, len? buffer : line);
+ }
+}
+
+static int set_state(h2_stream *stream, h2_stream_state_t state)
+{
+ int allowed = state_transition[state][stream->state];
+ if (allowed) {
+ stream->state = state;
+ return 1;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, APLOGNO(03081)
+ "h2_stream(%ld-%d): invalid state transition from %d to %d",
+ stream->session->id, stream->id, stream->state, state);
+ return 0;
+}
+
+static int close_input(h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_STREAM_ST_CLOSED_INPUT:
+ case H2_STREAM_ST_CLOSED:
+ return 0; /* ignore, idempotent */
+ case H2_STREAM_ST_CLOSED_OUTPUT:
+ /* both closed now */
+ set_state(stream, H2_STREAM_ST_CLOSED);
+ break;
+ default:
+ /* everything else we jump to here */
+ set_state(stream, H2_STREAM_ST_CLOSED_INPUT);
+ break;
+ }
+ return 1;
+}
+
+static int input_closed(h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_STREAM_ST_OPEN:
+ case H2_STREAM_ST_CLOSED_OUTPUT:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static int close_output(h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_STREAM_ST_CLOSED_OUTPUT:
+ case H2_STREAM_ST_CLOSED:
+ return 0; /* ignore, idempotent */
+ case H2_STREAM_ST_CLOSED_INPUT:
+ /* both closed now */
+ set_state(stream, H2_STREAM_ST_CLOSED);
+ break;
+ default:
+ /* everything else we jump to here */
+ set_state(stream, H2_STREAM_ST_CLOSED_OUTPUT);
+ break;
+ }
+ return 1;
+}
+
+static int input_open(const h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_STREAM_ST_OPEN:
+ case H2_STREAM_ST_CLOSED_OUTPUT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int output_open(h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_STREAM_ST_OPEN:
+ case H2_STREAM_ST_CLOSED_INPUT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static apr_status_t stream_pool_cleanup(void *ctx)
+{
+ h2_stream *stream = ctx;
+ apr_status_t status;
+
+ if (stream->input) {
+ h2_beam_destroy(stream->input);
+ stream->input = NULL;
+ }
+ if (stream->files) {
+ apr_file_t *file;
+ int i;
+ for (i = 0; i < stream->files->nelts; ++i) {
+ file = APR_ARRAY_IDX(stream->files, i, apr_file_t*);
+ status = apr_file_close(file);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, stream->session->c,
+ "h2_stream(%ld-%d): destroy, closed file %d",
+ stream->session->id, stream->id, i);
+ }
+ stream->files = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
+ int initiated_on, const h2_request *creq)
+{
+ h2_request *req;
+ h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
+
+ stream->id = id;
+ stream->state = H2_STREAM_ST_IDLE;
+ stream->pool = pool;
+ stream->session = session;
+ set_state(stream, H2_STREAM_ST_OPEN);
+
+ if (creq) {
+ /* take it into out pool and assure correct id's */
+ req = h2_request_clone(pool, creq);
+ req->id = id;
+ req->initiated_on = initiated_on;
+ }
+ else {
+ req = h2_req_create(id, pool,
+ h2_config_geti(session->config, H2_CONF_SER_HEADERS));
+ }
+ stream->request = req;
+
+ apr_pool_cleanup_register(pool, stream, stream_pool_cleanup,
+ apr_pool_cleanup_null);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082)
+ "h2_stream(%ld-%d): opened", session->id, stream->id);
+ return stream;
+}
+
+void h2_stream_cleanup(h2_stream *stream)
+{
+ AP_DEBUG_ASSERT(stream);
+ if (stream->buffer) {
+ apr_brigade_cleanup(stream->buffer);
+ }
+ if (stream->input) {
+ apr_status_t status;
+ status = h2_beam_shutdown(stream->input, APR_NONBLOCK_READ, 1);
+ if (status == APR_EAGAIN) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_stream(%ld-%d): wait on input shutdown",
+ stream->session->id, stream->id);
+ status = h2_beam_shutdown(stream->input, APR_BLOCK_READ, 1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
+ "h2_stream(%ld-%d): input shutdown returned",
+ stream->session->id, stream->id);
+ }
+ }
+}
+
+void h2_stream_destroy(h2_stream *stream)
+{
+ AP_DEBUG_ASSERT(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c,
+ "h2_stream(%ld-%d): destroy",
+ stream->session->id, stream->id);
+ if (stream->pool) {
+ apr_pool_destroy(stream->pool);
+ }
+}
+
+void h2_stream_eos_destroy(h2_stream *stream)
+{
+ h2_session_stream_done(stream->session, stream);
+ /* stream possibly destroyed */
+}
+
+apr_pool_t *h2_stream_detach_pool(h2_stream *stream)
+{
+ apr_pool_t *pool = stream->pool;
+ stream->pool = NULL;
+ return pool;
+}
+
+void h2_stream_rst(h2_stream *stream, int error_code)
+{
+ stream->rst_error = error_code;
+ close_input(stream);
+ close_output(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): reset, error=%d",
+ stream->session->id, stream->id, error_code);
+}
+
+struct h2_response *h2_stream_get_response(h2_stream *stream)
+{
+ return stream->response;
+}
+
+apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r)
+{
+ apr_status_t status;
+ AP_DEBUG_ASSERT(stream);
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+ set_state(stream, H2_STREAM_ST_OPEN);
+ status = h2_request_rwrite(stream->request, stream->pool, r);
+ stream->request->serialize = h2_config_geti(h2_config_rget(r),
+ H2_CONF_SER_HEADERS);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03058)
+ "h2_request(%d): rwrite %s host=%s://%s%s",
+ stream->request->id, stream->request->method,
+ stream->request->scheme, stream->request->authority,
+ stream->request->path);
+
+ return status;
+}
+
+apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ AP_DEBUG_ASSERT(stream);
+ if (!stream->response) {
+ if (name[0] == ':') {
+ if ((vlen) > stream->session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): pseudo header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_URI_TOO_LARGE);
+ }
+ }
+ else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) {
+ /* header too long */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+
+ if (name[0] != ':') {
+ ++stream->request_headers_added;
+ if (stream->request_headers_added
+ > stream->session->s->limit_req_fields) {
+ /* too many header lines */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): too many header lines",
+ stream->session->id, stream->id);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+ }
+ }
+
+ if (h2_stream_is_scheduled(stream)) {
+ return h2_request_add_trailer(stream->request, stream->pool,
+ name, nlen, value, vlen);
+ }
+ else {
+ if (!input_open(stream)) {
+ return APR_ECONNRESET;
+ }
+ return h2_request_add_header(stream->request, stream->pool,
+ name, nlen, value, vlen);
+ }
+}
+
+apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
+ h2_stream_pri_cmp *cmp, void *ctx)
+{
+ apr_status_t status;
+ AP_DEBUG_ASSERT(stream);
+ AP_DEBUG_ASSERT(stream->session);
+ AP_DEBUG_ASSERT(stream->session->mplx);
+
+ if (!output_open(stream)) {
+ return APR_ECONNRESET;
+ }
+ if (stream->scheduled) {
+ return APR_EINVAL;
+ }
+ if (eos) {
+ close_input(stream);
+ }
+
+ if (stream->response) {
+ /* already have a resonse, probably a HTTP error code */
+ return h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
+ }
+
+ /* Seeing the end-of-headers, we have everything we need to
+ * start processing it.
+ */
+ status = h2_request_end_headers(stream->request, stream->pool,
+ eos, push_enabled);
+ if (status == APR_SUCCESS) {
+ stream->request->body = !eos;
+ stream->scheduled = 1;
+ stream->input_remaining = stream->request->content_length;
+
+ status = h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): scheduled %s %s://%s%s",
+ stream->session->id, stream->id,
+ stream->request->method, stream->request->scheme,
+ stream->request->authority, stream->request->path);
+ }
+ else {
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
+ "h2_stream(%ld-%d): RST=2 (internal err) %s %s://%s%s",
+ stream->session->id, stream->id,
+ stream->request->method, stream->request->scheme,
+ stream->request->authority, stream->request->path);
+ }
+
+ return status;
+}
+
+int h2_stream_is_scheduled(const h2_stream *stream)
+{
+ return stream->scheduled;
+}
+
+apr_status_t h2_stream_close_input(h2_stream *stream)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ AP_DEBUG_ASSERT(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): closing input",
+ stream->session->id, stream->id);
+
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+
+ if (close_input(stream) && stream->input) {
+ status = h2_beam_close(stream->input);
+ }
+ return status;
+}
+
+apr_status_t h2_stream_write_data(h2_stream *stream,
+ const char *data, size_t len, int eos)
+{
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+
+ AP_DEBUG_ASSERT(stream);
+ if (!stream->input) {
+ return APR_EOF;
+ }
+ if (input_closed(stream) || !stream->request->eoh) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d",
+ stream->session->id, stream->id, input_closed(stream),
+ stream->request->eoh);
+ return APR_EINVAL;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_stream(%ld-%d): add %ld input bytes",
+ stream->session->id, stream->id, (long)len);
+
+ if (!stream->request->chunked) {
+ stream->input_remaining -= len;
+ if (stream->input_remaining < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
+ APLOGNO(02961)
+ "h2_stream(%ld-%d): got %ld more content bytes than announced "
+ "in content-length header: %ld",
+ stream->session->id, stream->id,
+ (long)stream->request->content_length,
+ -(long)stream->input_remaining);
+ h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
+ return APR_ECONNABORTED;
+ }
+ }
+
+ if (!stream->tmp) {
+ stream->tmp = apr_brigade_create(stream->pool, c->bucket_alloc);
+ }
+ apr_brigade_write(stream->tmp, NULL, NULL, data, len);
+ if (eos) {
+ APR_BRIGADE_INSERT_TAIL(stream->tmp,
+ apr_bucket_eos_create(c->bucket_alloc));
+ close_input(stream);
+ }
+
+ status = h2_beam_send(stream->input, stream->tmp, APR_BLOCK_READ);
+ apr_brigade_cleanup(stream->tmp);
+ return status;
+}
+
+void h2_stream_set_suspended(h2_stream *stream, int suspended)
+{
+ AP_DEBUG_ASSERT(stream);
+ stream->suspended = !!suspended;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ "h2_stream(%ld-%d): suspended=%d",
+ stream->session->id, stream->id, stream->suspended);
+}
+
+int h2_stream_is_suspended(const h2_stream *stream)
+{
+ AP_DEBUG_ASSERT(stream);
+ return stream->suspended;
+}
+
+static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
+{
+ conn_rec *c = stream->session->c;
+ apr_bucket *b;
+ apr_status_t status;
+
+ if (!stream->output) {
+ return APR_EOF;
+ }
+ status = h2_beam_receive(stream->output, stream->buffer,
+ APR_NONBLOCK_READ, amount);
+ /* The buckets we reveive are using the stream->buffer pool as
+ * lifetime which is exactly what we want since this is stream->pool.
+ *
+ * However: when we send these buckets down the core output filters, the
+ * filter might decide to setaside them into a pool of its own. And it
+ * might decide, after having sent the buckets, to clear its pool.
+ *
+ * This is problematic for file buckets because it then closed the contained
+ * file. Any split off buckets we sent afterwards will result in a
+ * APR_EBADF.
+ */
+ for (b = APR_BRIGADE_FIRST(stream->buffer);
+ b != APR_BRIGADE_SENTINEL(stream->buffer);
+ b = APR_BUCKET_NEXT(b)) {
+ if (APR_BUCKET_IS_FILE(b)) {
+ apr_bucket_file *f = (apr_bucket_file *)b->data;
+ apr_pool_t *fpool = apr_file_pool_get(f->fd);
+ if (fpool != c->pool) {
+ apr_bucket_setaside(b, c->pool);
+ if (!stream->files) {
+ stream->files = apr_array_make(stream->pool,
+ 5, sizeof(apr_file_t*));
+ }
+ APR_ARRAY_PUSH(stream->files, apr_file_t*) = f->fd;
+ }
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response,
+ h2_bucket_beam *output)
+{
+ apr_status_t status = APR_SUCCESS;
+ conn_rec *c = stream->session->c;
+
+ if (!output_open(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_stream(%ld-%d): output closed",
+ stream->session->id, stream->id);
+ return APR_ECONNRESET;
+ }
+
+ stream->response = response;
+ stream->output = output;
+ stream->buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
+
+ h2_stream_filter(stream);
+ if (stream->output) {
+ status = fill_buffer(stream, 0);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_stream(%ld-%d): set_response(%d)",
+ stream->session->id, stream->id,
+ stream->response->http_status);
+ return status;
+}
+
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status)
+{
+ h2_response *response;
+
+ if (stream->submitted) {
+ return APR_EINVAL;
+ }
+ response = h2_response_die(stream->id, http_status, stream->request,
+ stream->pool);
+ return h2_stream_set_response(stream, response, NULL);
+}
+
+static const apr_size_t DATA_CHUNK_SIZE = ((16*1024) - 100 - 9);
+
+apr_status_t h2_stream_out_prepare(h2_stream *stream,
+ apr_off_t *plen, int *peos)
+{
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+ apr_off_t requested;
+
+ if (stream->rst_error) {
+ *plen = 0;
+ *peos = 1;
+ return APR_ECONNRESET;
+ }
+
+ if (*plen > 0) {
+ requested = H2MIN(*plen, DATA_CHUNK_SIZE);
+ }
+ else {
+ requested = DATA_CHUNK_SIZE;
+ }
+ *plen = requested;
+
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_pre");
+ h2_util_bb_avail(stream->buffer, plen, peos);
+ if (!*peos && *plen < requested) {
+ /* try to get more data */
+ status = fill_buffer(stream, (requested - *plen) + DATA_CHUNK_SIZE);
+ if (APR_STATUS_IS_EOF(status)) {
+ apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->buffer, eos);
+ status = APR_SUCCESS;
+ }
+ else if (status == APR_EAGAIN) {
+ /* did not receive more, it's ok */
+ status = APR_SUCCESS;
+ }
+ *plen = requested;
+ h2_util_bb_avail(stream->buffer, plen, peos);
+ }
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_post");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
+ "h2_stream(%ld-%d): prepare, len=%ld eos=%d, trailers=%s",
+ c->id, stream->id, (long)*plen, *peos,
+ (stream->response && stream->response->trailers)?
+ "yes" : "no");
+ if (!*peos && !*plen && status == APR_SUCCESS) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+ status = h2_append_brigade(bb, stream->buffer, plen, peos);
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ status = APR_EAGAIN;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
+ "h2_stream(%ld-%d): read_to, len=%ld eos=%d",
+ c->id, stream->id, (long)*plen, *peos);
+ return status;
+}
+
+
+int h2_stream_input_is_open(const h2_stream *stream)
+{
+ return input_open(stream);
+}
+
+int h2_stream_needs_submit(const h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_STREAM_ST_OPEN:
+ case H2_STREAM_ST_CLOSED_INPUT:
+ case H2_STREAM_ST_CLOSED_OUTPUT:
+ case H2_STREAM_ST_CLOSED:
+ return !stream->submitted;
+ default:
+ return 0;
+ }
+}
+
+apr_status_t h2_stream_submit_pushes(h2_stream *stream)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_array_header_t *pushes;
+ int i;
+
+ pushes = h2_push_collect_update(stream, stream->request,
+ h2_stream_get_response(stream));
+ if (pushes && !apr_is_empty_array(pushes)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): found %d push candidates",
+ stream->session->id, stream->id, pushes->nelts);
+ for (i = 0; i < pushes->nelts; ++i) {
+ h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*);
+ h2_stream *s = h2_session_push(stream->session, stream, push);
+ if (!s) {
+ status = APR_ECONNRESET;
+ break;
+ }
+ }
+ }
+ return status;
+}
+
+apr_table_t *h2_stream_get_trailers(h2_stream *stream)
+{
+ return stream->response? stream->response->trailers : NULL;
+}
+
+const h2_priority *h2_stream_get_priority(h2_stream *stream)
+{
+ h2_response *response = h2_stream_get_response(stream);
+
+ if (response && stream->request && stream->request->initiated_on) {
+ const char *ctype = apr_table_get(response->headers, "content-type");
+ if (ctype) {
+ /* FIXME: Not good enough, config needs to come from request->server */
+ return h2_config_get_priority(stream->session->config, ctype);
+ }
+ }
+ return NULL;
+}
+
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
new file mode 100644
index 00000000..f80f8115
--- /dev/null
+++ b/modules/http2/h2_stream.h
@@ -0,0 +1,280 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_stream__
+#define __mod_h2__h2_stream__
+
+#include "h2.h"
+
+/**
+ * A HTTP/2 stream, e.g. a client request+response in HTTP/1.1 terms.
+ *
+ * A stream always belongs to a h2_session, the one managing the
+ * connection to the client. The h2_session writes to the h2_stream,
+ * adding HEADERS and DATA and finally an EOS. When headers are done,
+ * h2_stream is scheduled for handling, which is expected to produce
+ * a h2_response.
+ *
+ * The h2_response gives the HEADER frames to sent to the client, followed
+ * by DATA frames read from the h2_stream until EOS is reached.
+ */
+
+struct h2_mplx;
+struct h2_priority;
+struct h2_request;
+struct h2_response;
+struct h2_session;
+struct h2_sos;
+struct h2_bucket_beam;
+
+typedef struct h2_stream h2_stream;
+
+struct h2_stream {
+ int id; /* http2 stream id */
+ h2_stream_state_t state; /* http/2 state of this stream */
+ struct h2_session *session; /* the session this stream belongs to */
+
+ apr_pool_t *pool; /* the memory pool for this stream */
+ struct h2_request *request; /* the request made in this stream */
+ struct h2_bucket_beam *input;
+ int request_headers_added; /* number of request headers added */
+
+ struct h2_response *response;
+ struct h2_bucket_beam *output;
+ apr_bucket_brigade *buffer;
+ apr_bucket_brigade *tmp;
+ apr_array_header_t *files; /* apr_file_t* we collected during I/O */
+
+ int rst_error; /* stream error for RST_STREAM */
+ unsigned int aborted : 1; /* was aborted */
+ unsigned int suspended : 1; /* DATA sending has been suspended */
+ unsigned int scheduled : 1; /* stream has been scheduled */
+ unsigned int started : 1; /* stream has started processing */
+ unsigned int submitted : 1; /* response HEADER has been sent */
+
+ apr_off_t input_remaining; /* remaining bytes on input as advertised via content-length */
+ apr_off_t data_frames_sent; /* # of DATA frames sent out for this stream */
+};
+
+
+#define H2_STREAM_RST(s, def) (s->rst_error? s->rst_error : (def))
+
+/**
+ * Create a stream in OPEN state.
+ * @param id the stream identifier
+ * @param pool the memory pool to use for this stream
+ * @param session the session this stream belongs to
+ * @return the newly opened stream
+ */
+h2_stream *h2_stream_open(int id, apr_pool_t *pool, struct h2_session *session,
+ int initiated_on, const struct h2_request *req);
+
+/**
+ * Cleanup any resources still held by the stream, called by last bucket.
+ */
+void h2_stream_eos_destroy(h2_stream *stream);
+
+/**
+ * Destroy memory pool if still owned by the stream.
+ */
+void h2_stream_destroy(h2_stream *stream);
+
+/**
+ * Removes stream from h2_session and destroys it.
+ *
+ * @param stream the stream to cleanup
+ */
+void h2_stream_cleanup(h2_stream *stream);
+
+/**
+ * Detach the memory pool from the stream. Will prevent stream
+ * destruction to take the pool with it.
+ *
+ * @param stream the stream to detach the pool from
+ * @result the detached memory pool or NULL if stream no longer has one
+ */
+apr_pool_t *h2_stream_detach_pool(h2_stream *stream);
+
+
+/**
+ * Initialize stream->request with the given request_rec.
+ *
+ * @param stream stream to write request to
+ * @param r the request with all the meta data
+ */
+apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r);
+
+/*
+ * Add a HTTP/2 header (including pseudo headers) or trailer
+ * to the given stream, depending on stream state.
+ *
+ * @param stream stream to write the header to
+ * @param name the name of the HTTP/2 header
+ * @param nlen the number of characters in name
+ * @param value the header value
+ * @param vlen the number of characters in value
+ */
+apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+/**
+ * Closes the stream's input.
+ *
+ * @param stream stream to close intput of
+ */
+apr_status_t h2_stream_close_input(h2_stream *stream);
+
+/*
+ * Write a chunk of DATA to the stream.
+ *
+ * @param stream stream to write the data to
+ * @param data the beginning of the bytes to write
+ * @param len the number of bytes to write
+ */
+apr_status_t h2_stream_write_data(h2_stream *stream,
+ const char *data, size_t len, int eos);
+
+/**
+ * Reset the stream. Stream write/reads will return errors afterwards.
+ *
+ * @param stream the stream to reset
+ * @param error_code the HTTP/2 error code
+ */
+void h2_stream_rst(h2_stream *stream, int error_code);
+
+/**
+ * Schedule the stream for execution. All header information must be
+ * present. Use the given priority comparision callback to determine
+ * order in queued streams.
+ *
+ * @param stream the stream to schedule
+ * @param eos != 0 iff no more input will arrive
+ * @param cmp priority comparision
+ * @param ctx context for comparision
+ */
+apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
+ h2_stream_pri_cmp *cmp, void *ctx);
+
+/**
+ * Determine if stream has been scheduled already.
+ * @param stream the stream to check on
+ * @return != 0 iff stream has been scheduled
+ */
+int h2_stream_is_scheduled(const h2_stream *stream);
+
+struct h2_response *h2_stream_get_response(h2_stream *stream);
+
+/**
+ * Set the response for this stream. Invoked when all meta data for
+ * the stream response has been collected.
+ *
+ * @param stream the stream to set the response for
+ * @param response the response data for the stream
+ * @param bb bucket brigade with output data for the stream. Optional,
+ * may be incomplete.
+ */
+apr_status_t h2_stream_set_response(h2_stream *stream,
+ struct h2_response *response,
+ struct h2_bucket_beam *output);
+
+/**
+ * Set the HTTP error status as response.
+ */
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status);
+
+/**
+ * Do a speculative read on the stream output to determine the
+ * amount of data that can be read.
+ *
+ * @param stream the stream to speculatively read from
+ * @param plen (in-/out) number of bytes requested and on return amount of bytes that
+ * may be read without blocking
+ * @param peos (out) != 0 iff end of stream will be reached when reading plen
+ * bytes (out value).
+ * @return APR_SUCCESS if out information was computed successfully.
+ * APR_EAGAIN if not data is available and end of stream has not been
+ * reached yet.
+ */
+apr_status_t h2_stream_out_prepare(h2_stream *stream,
+ apr_off_t *plen, int *peos);
+
+/**
+ * Read a maximum number of bytes into the bucket brigade.
+ *
+ * @param stream the stream to read from
+ * @param bb the brigade to append output to
+ * @param plen (in-/out) max. number of bytes to append and on return actual
+ * number of bytes appended to brigade
+ * @param peos (out) != 0 iff end of stream has been reached while reading
+ * @return APR_SUCCESS if out information was computed successfully.
+ * APR_EAGAIN if not data is available and end of stream has not been
+ * reached yet.
+ */
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos);
+
+/**
+ * Get optional trailers for this stream, may be NULL. Meaningful
+ * results can only be expected when the end of the response body has
+ * been reached.
+ *
+ * @param stream to ask for trailers
+ * @return trailers for NULL
+ */
+apr_table_t *h2_stream_get_trailers(h2_stream *stream);
+
+/**
+ * Set the suspended state of the stream.
+ * @param stream the stream to change state on
+ * @param suspended boolean value if stream is suspended
+ */
+void h2_stream_set_suspended(h2_stream *stream, int suspended);
+
+/**
+ * Check if the stream has been suspended.
+ * @param stream the stream to check
+ * @return != 0 iff stream is suspended.
+ */
+int h2_stream_is_suspended(const h2_stream *stream);
+
+/**
+ * Check if the stream has open input.
+ * @param stream the stream to check
+ * @return != 0 iff stream has open input.
+ */
+int h2_stream_input_is_open(const h2_stream *stream);
+
+/**
+ * Check if the stream has not submitted a response or RST yet.
+ * @param stream the stream to check
+ * @return != 0 iff stream has not submitted a response or RST.
+ */
+int h2_stream_needs_submit(const h2_stream *stream);
+
+/**
+ * Submit any server push promises on this stream and schedule
+ * the tasks connection with these.
+ *
+ * @param stream the stream for which to submit
+ */
+apr_status_t h2_stream_submit_pushes(h2_stream *stream);
+
+/**
+ * Get priority information set for this stream.
+ */
+const struct h2_priority *h2_stream_get_priority(h2_stream *stream);
+
+#endif /* defined(__mod_h2__h2_stream__) */
diff --git a/modules/http2/h2_stream_set.c b/modules/http2/h2_stream_set.c
new file mode 100644
index 00000000..aa0f8c65
--- /dev/null
+++ b/modules/http2/h2_stream_set.c
@@ -0,0 +1,145 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <apr_hash.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_stream.h"
+#include "h2_stream_set.h"
+
+
+struct h2_stream_set {
+ apr_hash_t *hash;
+};
+
+static unsigned int stream_hash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_stream_set *h2_stream_set_create(apr_pool_t *pool, int max)
+{
+ h2_stream_set *sp = apr_pcalloc(pool, sizeof(h2_stream_set));
+ sp->hash = apr_hash_make_custom(pool, stream_hash);
+
+ return sp;
+}
+
+void h2_stream_set_destroy(h2_stream_set *sp)
+{
+ (void)sp;
+}
+
+h2_stream *h2_stream_set_get(h2_stream_set *sp, int stream_id)
+{
+ return apr_hash_get(sp->hash, &stream_id, sizeof(stream_id));
+}
+
+void h2_stream_set_add(h2_stream_set *sp, h2_stream *stream)
+{
+ apr_hash_set(sp->hash, &stream->id, sizeof(stream->id), stream);
+}
+
+void h2_stream_set_remove(h2_stream_set *sp, int stream_id)
+{
+ apr_hash_set(sp->hash, &stream_id, sizeof(stream_id), NULL);
+}
+
+int h2_stream_set_is_empty(h2_stream_set *sp)
+{
+ return apr_hash_count(sp->hash) == 0;
+}
+
+apr_size_t h2_stream_set_size(h2_stream_set *sp)
+{
+ return apr_hash_count(sp->hash);
+}
+
+typedef struct {
+ h2_stream_set_iter_fn *iter;
+ void *ctx;
+} iter_ctx;
+
+static int hash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (h2_stream*)val);
+}
+
+void h2_stream_set_iter(h2_stream_set *sp,
+ h2_stream_set_iter_fn *iter, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = iter;
+ ictx.ctx = ctx;
+ apr_hash_do(hash_iter, &ictx, sp->hash);
+}
+
+static int unsubmitted_iter(void *ctx, h2_stream *stream)
+{
+ if (h2_stream_needs_submit(stream)) {
+ *((int *)ctx) = 1;
+ return 0;
+ }
+ return 1;
+}
+
+int h2_stream_set_has_unsubmitted(h2_stream_set *sp)
+{
+ int has_unsubmitted = 0;
+ h2_stream_set_iter(sp, unsubmitted_iter, &has_unsubmitted);
+ return has_unsubmitted;
+}
+
+static int input_open_iter(void *ctx, h2_stream *stream)
+{
+ if (h2_stream_input_is_open(stream)) {
+ *((int *)ctx) = 1;
+ return 0;
+ }
+ return 1;
+}
+
+int h2_stream_set_has_open_input(h2_stream_set *sp)
+{
+ int has_input_open = 0;
+ h2_stream_set_iter(sp, input_open_iter, &has_input_open);
+ return has_input_open;
+}
+
+static int suspended_iter(void *ctx, h2_stream *stream)
+{
+ if (h2_stream_is_suspended(stream)) {
+ *((int *)ctx) = 1;
+ return 0;
+ }
+ return 1;
+}
+
+int h2_stream_set_has_suspended(h2_stream_set *sp)
+{
+ int has_suspended = 0;
+ h2_stream_set_iter(sp, suspended_iter, &has_suspended);
+ return has_suspended;
+}
+
diff --git a/modules/http2/h2_stream_set.h b/modules/http2/h2_stream_set.h
new file mode 100644
index 00000000..d0041c48
--- /dev/null
+++ b/modules/http2/h2_stream_set.h
@@ -0,0 +1,51 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_stream_set__
+#define __mod_h2__h2_stream_set__
+
+/**
+ * A set of h2_stream instances. Allows lookup by stream id
+ * and other criteria.
+ */
+
+typedef h2_stream *h2_stream_set_match_fn(void *ctx, h2_stream *stream);
+typedef int h2_stream_set_iter_fn(void *ctx, h2_stream *stream);
+
+typedef struct h2_stream_set h2_stream_set;
+
+
+h2_stream_set *h2_stream_set_create(apr_pool_t *pool, int max);
+
+void h2_stream_set_destroy(h2_stream_set *sp);
+
+void h2_stream_set_add(h2_stream_set *sp, h2_stream *stream);
+
+h2_stream *h2_stream_set_get(h2_stream_set *sp, int stream_id);
+
+void h2_stream_set_remove(h2_stream_set *sp, int stream_id);
+
+void h2_stream_set_iter(h2_stream_set *sp,
+ h2_stream_set_iter_fn *iter, void *ctx);
+
+int h2_stream_set_is_empty(h2_stream_set *sp);
+
+apr_size_t h2_stream_set_size(h2_stream_set *sp);
+
+int h2_stream_set_has_unsubmitted(h2_stream_set *sp);
+int h2_stream_set_has_open_input(h2_stream_set *sp);
+int h2_stream_set_has_suspended(h2_stream_set *sp);
+
+#endif /* defined(__mod_h2__h2_stream_set__) */
diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c
new file mode 100644
index 00000000..d1d4a60f
--- /dev/null
+++ b/modules/http2/h2_switch.c
@@ -0,0 +1,185 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_strings.h>
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_conn.h"
+#include "h2_h2.h"
+#include "h2_switch.h"
+
+/*******************************************************************************
+ * Once per lifetime init, retrieve optional functions
+ */
+apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s)
+{
+ (void)pool;
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_switch init");
+
+ return APR_SUCCESS;
+}
+
+static int h2_protocol_propose(conn_rec *c, request_rec *r,
+ server_rec *s,
+ const apr_array_header_t *offers,
+ apr_array_header_t *proposals)
+{
+ int proposed = 0;
+ int is_tls = h2_h2_is_tls(c);
+ const char **protos = is_tls? h2_tls_protos : h2_clear_protos;
+
+ (void)s;
+ if (strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c))) {
+ /* We do not know how to switch from anything else but http/1.1.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03083)
+ "protocol switch: current proto != http/1.1, declined");
+ return DECLINED;
+ }
+
+ if (!h2_is_acceptable_connection(c, 0)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084)
+ "protocol propose: connection requirements not met");
+ return DECLINED;
+ }
+
+ if (r) {
+ /* So far, this indicates an HTTP/1 Upgrade header initiated
+ * protocol switch. For that, the HTTP2-Settings header needs
+ * to be present and valid for the connection.
+ */
+ const char *p;
+
+ if (!h2_allows_h2_upgrade(c)) {
+ return DECLINED;
+ }
+
+ p = apr_table_get(r->headers_in, "HTTP2-Settings");
+ if (!p) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03085)
+ "upgrade without HTTP2-Settings declined");
+ return DECLINED;
+ }
+
+ p = apr_table_get(r->headers_in, "Connection");
+ if (!ap_find_token(r->pool, p, "http2-settings")) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03086)
+ "upgrade without HTTP2-Settings declined");
+ return DECLINED;
+ }
+
+ /* We also allow switching only for requests that have no body.
+ */
+ p = apr_table_get(r->headers_in, "Content-Length");
+ if (p && strcmp(p, "0")) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087)
+ "upgrade with content-length: %s, declined", p);
+ return DECLINED;
+ }
+ }
+
+ while (*protos) {
+ /* Add all protocols we know (tls or clear) and that
+ * are part of the offerings (if there have been any).
+ */
+ if (!offers || ap_array_str_contains(offers, *protos)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "proposing protocol '%s'", *protos);
+ APR_ARRAY_PUSH(proposals, const char*) = *protos;
+ proposed = 1;
+ }
+ ++protos;
+ }
+ return proposed? DECLINED : OK;
+}
+
+static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
+ const char *protocol)
+{
+ int found = 0;
+ const char **protos = h2_h2_is_tls(c)? h2_tls_protos : h2_clear_protos;
+ const char **p = protos;
+
+ (void)s;
+ while (*p) {
+ if (!strcmp(*p, protocol)) {
+ found = 1;
+ break;
+ }
+ p++;
+ }
+
+ if (found) {
+ h2_ctx *ctx = h2_ctx_get(c, 1);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "switching protocol to '%s'", protocol);
+ h2_ctx_protocol_set(ctx, protocol);
+ h2_ctx_server_set(ctx, s);
+
+ if (r != NULL) {
+ apr_status_t status;
+ /* Switching in the middle of a request means that
+ * we have to send out the response to this one in h2
+ * format. So we need to take over the connection
+ * right away.
+ */
+ ap_remove_input_filter_byhandle(r->input_filters, "http_in");
+ ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout");
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+
+ /* Ok, start an h2_conn on this one. */
+ h2_ctx_server_set(ctx, r->server);
+ status = h2_conn_setup(ctx, r->connection, r);
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
+ "session setup");
+ return status;
+ }
+
+ h2_conn_run(ctx, c);
+ return DONE;
+ }
+ return DONE;
+ }
+
+ return DECLINED;
+}
+
+static const char *h2_protocol_get(const conn_rec *c)
+{
+ return h2_ctx_protocol_get(c);
+}
+
+void h2_switch_register_hooks(void)
+{
+ ap_hook_protocol_propose(h2_protocol_propose, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_protocol_switch(h2_protocol_switch, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_protocol_get(h2_protocol_get, NULL, NULL, APR_HOOK_MIDDLE);
+}
diff --git a/modules/http2/h2_switch.h b/modules/http2/h2_switch.h
new file mode 100644
index 00000000..3d9c628c
--- /dev/null
+++ b/modules/http2/h2_switch.h
@@ -0,0 +1,29 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_switch__
+#define __mod_h2__h2_switch__
+
+/*
+ * One time, post config intialization.
+ */
+apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s);
+
+/* Register apache hooks for protocol switching
+ */
+void h2_switch_register_hooks(void);
+
+
+#endif /* defined(__mod_h2__h2_switch__) */
diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c
new file mode 100644
index 00000000..381d0b1c
--- /dev/null
+++ b/modules/http2/h2_task.c
@@ -0,0 +1,829 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <apr_atomic.h>
+#include <apr_thread_cond.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mpm.h>
+#include <mod_core.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_conn.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_from_h1.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_task.h"
+#include "h2_worker.h"
+#include "h2_util.h"
+
+/*******************************************************************************
+ * task input handling
+ ******************************************************************************/
+
+static int input_ser_header(void *ctx, const char *name, const char *value)
+{
+ h2_task *task = ctx;
+ apr_brigade_printf(task->input.bb, NULL, NULL, "%s: %s\r\n", name, value);
+ return 1;
+}
+
+static void make_chunk(h2_task *task, apr_bucket_brigade *bb,
+ apr_bucket *first, apr_uint64_t chunk_len,
+ apr_bucket *tail)
+{
+ /* Surround the buckets [first, tail[ with new buckets carrying the
+ * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
+ * to the end of the brigade. */
+ char buffer[128];
+ apr_bucket *c;
+ int len;
+
+ len = apr_snprintf(buffer, H2_ALEN(buffer),
+ "%"APR_UINT64_T_HEX_FMT"\r\n", chunk_len);
+ c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(first, c);
+ c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc);
+ if (tail) {
+ APR_BUCKET_INSERT_BEFORE(tail, c);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(bb, c);
+ }
+}
+
+static apr_status_t input_handle_eos(h2_task *task, request_rec *r,
+ apr_bucket *b)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_brigade *bb = task->input.bb;
+ apr_table_t *t = task->request? task->request->trailers : NULL;
+
+ if (task->input.chunked) {
+ task->input.tmp = apr_brigade_split_ex(bb, b, task->input.tmp);
+ if (t && !apr_is_empty_table(t)) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+ apr_table_do(input_ser_header, task, t, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+ APR_BRIGADE_CONCAT(bb, task->input.tmp);
+ }
+ else if (r && t && !apr_is_empty_table(t)){
+ /* trailers passed in directly. */
+ apr_table_overlap(r->trailers_in, t, APR_OVERLAP_TABLES_SET);
+ }
+ task->input.eos_written = 1;
+ return status;
+}
+
+static apr_status_t input_append_eos(h2_task *task, request_rec *r)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_brigade *bb = task->input.bb;
+ apr_table_t *t = task->request? task->request->trailers : NULL;
+
+ if (task->input.chunked) {
+ if (t && !apr_is_empty_table(t)) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+ apr_table_do(input_ser_header, task, t, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+ }
+ else if (r && t && !apr_is_empty_table(t)){
+ /* trailers passed in directly. */
+ apr_table_overlap(r->trailers_in, t, APR_OVERLAP_TABLES_SET);
+ }
+ APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(bb->bucket_alloc));
+ task->input.eos_written = 1;
+ return status;
+}
+
+static apr_status_t input_read(h2_task *task, ap_filter_t* f,
+ apr_bucket_brigade* bb, ap_input_mode_t mode,
+ apr_read_type_e block, apr_off_t readbytes)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next, *first_data;
+ apr_off_t bblen = 0;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): read, mode=%d, block=%d, readbytes=%ld",
+ task->id, mode, block, (long)readbytes);
+
+ if (mode == AP_MODE_INIT) {
+ return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
+ }
+
+ if (f->c->aborted || !task->request) {
+ return APR_ECONNABORTED;
+ }
+
+ if (!task->input.bb) {
+ if (!task->input.eos_written) {
+ input_append_eos(task, f->r);
+ return APR_SUCCESS;
+ }
+ return APR_EOF;
+ }
+
+ /* Cleanup brigades from those nasty 0 length non-meta buckets
+ * that apr_brigade_split_line() sometimes produces. */
+ for (b = APR_BRIGADE_FIRST(task->input.bb);
+ b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) {
+ apr_bucket_delete(b);
+ }
+ }
+
+ while (APR_BRIGADE_EMPTY(task->input.bb) && !task->input.eos) {
+ /* Get more input data for our request. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): get more data from mplx, block=%d, "
+ "readbytes=%ld, queued=%ld",
+ task->id, block, (long)readbytes, (long)bblen);
+
+ /* Override the block mode we get called with depending on the input's
+ * setting. */
+ if (task->input.beam) {
+ status = h2_beam_receive(task->input.beam, task->input.bb, block,
+ H2MIN(readbytes, 32*1024));
+ }
+ else {
+ status = APR_EOF;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_task(%s): read returned", task->id);
+ if (APR_STATUS_IS_EAGAIN(status)
+ && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
+ /* chunked input handling does not seem to like it if we
+ * return with APR_EAGAIN from a GETLINE read...
+ * upload 100k test on test-ser.example.org hangs */
+ status = APR_SUCCESS;
+ }
+ else if (APR_STATUS_IS_EOF(status)) {
+ task->input.eos = 1;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ /* Inspect the buckets received, detect EOS and apply
+ * chunked encoding if necessary */
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "input.beam recv raw", task->input.bb);
+ first_data = NULL;
+ bblen = 0;
+ for (b = APR_BRIGADE_FIRST(task->input.bb);
+ b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (first_data && task->input.chunked) {
+ make_chunk(task, task->input.bb, first_data, bblen, b);
+ first_data = NULL;
+ bblen = 0;
+ }
+ if (APR_BUCKET_IS_EOS(b)) {
+ task->input.eos = 1;
+ input_handle_eos(task, f->r, b);
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "input.bb after handle eos",
+ task->input.bb);
+ }
+ }
+ else if (b->length == 0) {
+ apr_bucket_delete(b);
+ }
+ else {
+ if (!first_data) {
+ first_data = b;
+ }
+ bblen += b->length;
+ }
+ }
+ if (first_data && task->input.chunked) {
+ make_chunk(task, task->input.bb, first_data, bblen, NULL);
+ }
+
+ if (h2_task_logio_add_bytes_in) {
+ h2_task_logio_add_bytes_in(f->c, bblen);
+ }
+ }
+
+ if (task->input.eos) {
+ if (!task->input.eos_written) {
+ input_append_eos(task, f->r);
+ }
+ if (APR_BRIGADE_EMPTY(task->input.bb)) {
+ return APR_EOF;
+ }
+ }
+
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "task_input.bb", task->input.bb);
+
+ if (APR_BRIGADE_EMPTY(task->input.bb)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): no data", task->id);
+ return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, task->input.bb);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, task->input.bb, readbytes);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, task->input.bb, readbytes);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, task->input.bb, block,
+ HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): getline: %s",
+ task->id, buffer);
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(02942)
+ "h2_task, unsupported READ mode %d", mode);
+ status = APR_ENOTIMPL;
+ }
+
+ if (APLOGctrace1(f->c)) {
+ apr_brigade_length(bb, 0, &bblen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): return %ld data bytes",
+ task->id, (long)bblen);
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * task output handling
+ ******************************************************************************/
+
+static apr_status_t open_response(h2_task *task)
+{
+ h2_response *response;
+ response = h2_from_h1_get_response(task->output.from_h1);
+ if (!response) {
+ /* This happens currently when ap_die(status, r) is invoked
+ * by a read request filter. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03204)
+ "h2_task(%s): write without response for %s %s %s",
+ task->id,
+ task->request->method,
+ task->request->authority,
+ task->request->path);
+ task->c->aborted = 1;
+ return APR_ECONNABORTED;
+ }
+
+ if (h2_task_logio_add_bytes_out) {
+ /* count headers as if we'd do a HTTP/1.1 serialization */
+ task->output.written = h2_util_table_bytes(response->headers, 3)+1;
+ h2_task_logio_add_bytes_out(task->c, task->output.written);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348)
+ "h2_task(%s): open response to %s %s %s",
+ task->id, task->request->method,
+ task->request->authority,
+ task->request->path);
+ return h2_mplx_out_open(task->mplx, task->stream_id, response);
+}
+
+static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb)
+{
+ apr_off_t written, left;
+ apr_status_t status;
+
+ apr_brigade_length(bb, 0, &written);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): write response body (%ld bytes)",
+ task->id, (long)written);
+
+ status = h2_beam_send(task->output.beam, bb,
+ task->blocking? APR_BLOCK_READ
+ : APR_NONBLOCK_READ);
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ apr_brigade_length(bb, 0, &left);
+ written -= left;
+ status = APR_SUCCESS;
+ }
+ if (status == APR_SUCCESS) {
+ task->output.written += written;
+ if (h2_task_logio_add_bytes_out) {
+ h2_task_logio_add_bytes_out(task->c, written);
+ }
+ }
+ return status;
+}
+
+/* Bring the data from the brigade (which represents the result of the
+ * request_rec out filter chain) into the h2_mplx for further sending
+ * on the master connection.
+ */
+static apr_status_t output_write(h2_task *task, ap_filter_t* f,
+ apr_bucket_brigade* bb)
+{
+ apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+ int flush = 0;
+
+ if (APR_BRIGADE_EMPTY(bb)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): empty write", task->id);
+ return APR_SUCCESS;
+ }
+
+ if (task->frozen) {
+ h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
+ "frozen task output write, ignored", bb);
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (AP_BUCKET_IS_EOR(b)) {
+ APR_BUCKET_REMOVE(b);
+ task->eor = b;
+ }
+ else {
+ apr_bucket_delete(b);
+ }
+ }
+ return APR_SUCCESS;
+ }
+
+ if (!task->output.beam) {
+ h2_beam_create(&task->output.beam, task->pool,
+ task->stream_id, "output", 0);
+ }
+
+ /* Attempt to write saved brigade first */
+ if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
+ status = send_out(task, task->output.bb);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ /* If there is nothing saved (anymore), try to write the brigade passed */
+ if ((!task->output.bb || APR_BRIGADE_EMPTY(task->output.bb))
+ && !APR_BRIGADE_EMPTY(bb)) {
+ /* check if we have a flush before the end-of-request */
+ if (!task->output.response_open) {
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b)) {
+ if (AP_BUCKET_IS_EOR(b)) {
+ break;
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+ flush = 1;
+ }
+ }
+ }
+
+ status = send_out(task, bb);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ /* If the passed brigade is not empty, save it before return */
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03405)
+ "h2_task(%s): could not write all, saving brigade",
+ task->id);
+ if (!task->output.bb) {
+ task->output.bb = apr_brigade_create(task->pool,
+ task->c->bucket_alloc);
+ }
+ return ap_save_brigade(f, &task->output.bb, &bb, task->pool);
+ }
+
+ if (!task->output.response_open
+ && (flush || h2_beam_get_mem_used(task->output.beam) > (32*1024))) {
+ /* if we have enough buffered or we got a flush bucket, open
+ * the response now. */
+ status = open_response(task);
+ task->output.response_open = 1;
+ }
+
+ return status;
+}
+
+static apr_status_t output_finish(h2_task *task)
+{
+ apr_status_t status = APR_SUCCESS;
+
+ if (!task->output.response_open) {
+ status = open_response(task);
+ task->output.response_open = 1;
+ }
+ return status;
+}
+
+/*******************************************************************************
+ * task slave connection filters
+ ******************************************************************************/
+
+static apr_status_t h2_filter_stream_input(ap_filter_t* filter,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_task *task = h2_ctx_cget_task(filter->c);
+ AP_DEBUG_ASSERT(task);
+ return input_read(task, filter, brigade, mode, block, readbytes);
+}
+
+static apr_status_t h2_filter_stream_output(ap_filter_t* filter,
+ apr_bucket_brigade* brigade)
+{
+ h2_task *task = h2_ctx_cget_task(filter->c);
+ AP_DEBUG_ASSERT(task);
+ return output_write(task, filter, brigade);
+}
+
+static apr_status_t h2_filter_read_response(ap_filter_t* filter,
+ apr_bucket_brigade* bb)
+{
+ h2_task *task = h2_ctx_cget_task(filter->c);
+ AP_DEBUG_ASSERT(task);
+ if (!task->output.from_h1) {
+ return APR_ECONNABORTED;
+ }
+ return h2_from_h1_read_response(task->output.from_h1, filter, bb);
+}
+
+/*******************************************************************************
+ * task things
+ ******************************************************************************/
+
+void h2_task_set_response(h2_task *task, h2_response *response)
+{
+ AP_DEBUG_ASSERT(response);
+ AP_DEBUG_ASSERT(!task->response);
+ /* we used to clone the response into out own pool. But
+ * we have much tighter control over the EOR bucket nowadays,
+ * so just use the instance given */
+ task->response = response;
+ if (response->rst_error) {
+ h2_task_rst(task, response->rst_error);
+ }
+}
+
+
+int h2_task_can_redo(h2_task *task) {
+ if (task->submitted
+ || (task->input.beam && h2_beam_was_received(task->input.beam))
+ || !task->request) {
+ /* cannot repeat that. */
+ return 0;
+ }
+ return (!strcmp("GET", task->request->method)
+ || !strcmp("HEAD", task->request->method)
+ || !strcmp("OPTIONS", task->request->method));
+}
+
+void h2_task_redo(h2_task *task)
+{
+ task->response = NULL;
+ task->rst_error = 0;
+}
+
+void h2_task_rst(h2_task *task, int error)
+{
+ task->rst_error = error;
+ if (task->input.beam) {
+ h2_beam_abort(task->input.beam);
+ }
+ if (task->output.beam) {
+ h2_beam_abort(task->output.beam);
+ }
+ if (task->c) {
+ task->c->aborted = 1;
+ }
+}
+
+/*******************************************************************************
+ * Register various hooks
+ */
+static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
+static int h2_task_pre_conn(conn_rec* c, void *arg);
+static int h2_task_process_conn(conn_rec* c);
+
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
+
+void h2_task_register_hooks(void)
+{
+ /* This hook runs on new connections before mod_ssl has a say.
+ * Its purpose is to prevent mod_ssl from touching our pseudo-connections
+ * for streams.
+ */
+ ap_hook_pre_connection(h2_task_pre_conn,
+ NULL, mod_ssl, APR_HOOK_FIRST);
+ /* When the connection processing actually starts, we might
+ * take over, if the connection is for a task.
+ */
+ ap_hook_process_connection(h2_task_process_conn,
+ NULL, NULL, APR_HOOK_FIRST);
+
+ ap_register_output_filter("H2_RESPONSE", h2_response_output_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_input_filter("H2_TO_H1", h2_filter_stream_input,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H1_TO_H2", h2_filter_stream_output,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H1_TO_H2_RESP", h2_filter_read_response,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_TRAILERS", h2_response_trailers_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+}
+
+/* post config init */
+apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s)
+{
+ h2_task_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in);
+ h2_task_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out);
+
+ return APR_SUCCESS;
+}
+
+static int h2_task_pre_conn(conn_rec* c, void *arg)
+{
+ h2_ctx *ctx;
+
+ if (!c->master) {
+ return OK;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ (void)arg;
+ if (h2_ctx_is_task(ctx)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, pre_connection, found stream task");
+
+ /* Add our own, network level in- and output filters.
+ */
+ ap_add_input_filter("H2_TO_H1", NULL, NULL, c);
+ ap_add_output_filter("H1_TO_H2", NULL, NULL, c);
+ }
+ return OK;
+}
+
+h2_task *h2_task_create(conn_rec *c, const h2_request *req,
+ h2_bucket_beam *input, h2_mplx *mplx)
+{
+ apr_pool_t *pool;
+ h2_task *task;
+
+ apr_pool_create(&pool, c->pool);
+ task = apr_pcalloc(pool, sizeof(h2_task));
+ if (task == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, c,
+ APLOGNO(02941) "h2_task(%ld-%d): create stream task",
+ c->id, req->id);
+ return NULL;
+ }
+
+ task->id = apr_psprintf(pool, "%ld-%d", c->id, req->id);
+ task->stream_id = req->id;
+ task->c = c;
+ task->mplx = mplx;
+ task->c->keepalives = mplx->c->keepalives;
+ task->pool = pool;
+ task->request = req;
+ task->ser_headers = req->serialize;
+ task->blocking = 1;
+ task->input.beam = input;
+
+ apr_thread_cond_create(&task->cond, pool);
+
+ h2_ctx_create_for(c, task);
+ return task;
+}
+
+void h2_task_destroy(h2_task *task)
+{
+ if (task->output.beam) {
+ h2_beam_destroy(task->output.beam);
+ task->output.beam = NULL;
+ }
+ if (task->eor) {
+ apr_bucket_destroy(task->eor);
+ }
+ if (task->pool) {
+ apr_pool_destroy(task->pool);
+ }
+}
+
+void h2_task_set_io_blocking(h2_task *task, int blocking)
+{
+ task->blocking = blocking;
+}
+
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread)
+{
+ AP_DEBUG_ASSERT(task);
+
+ task->input.block = APR_BLOCK_READ;
+ task->input.chunked = task->request->chunked;
+ task->input.eos = !task->request->body;
+ if (task->input.eos && !task->input.chunked && !task->ser_headers) {
+ /* We do not serialize/chunk and have eos already, no need to
+ * create a bucket brigade. */
+ task->input.bb = NULL;
+ task->input.eos_written = 1;
+ }
+ else {
+ task->input.bb = apr_brigade_create(task->pool, task->c->bucket_alloc);
+ if (task->ser_headers) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): serialize request %s %s",
+ task->id, task->request->method, task->request->path);
+ apr_brigade_printf(task->input.bb, NULL,
+ NULL, "%s %s HTTP/1.1\r\n",
+ task->request->method, task->request->path);
+ apr_table_do(input_ser_header, task, task->request->headers, NULL);
+ apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n");
+ }
+ if (task->input.eos) {
+ input_append_eos(task, NULL);
+ }
+ }
+
+ task->output.from_h1 = h2_from_h1_create(task->stream_id, task->pool);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): process connection", task->id);
+ task->c->current_thread = thread;
+ ap_run_process_connection(task->c);
+
+ if (task->frozen) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): process_conn returned frozen task",
+ task->id);
+ /* cleanup delayed */
+ return APR_EAGAIN;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_task(%s): processing done", task->id);
+ return output_finish(task);
+ }
+}
+
+static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
+{
+ const h2_request *req = task->request;
+ conn_state_t *cs = c->cs;
+ request_rec *r;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): create request_rec", task->id);
+ r = h2_request_create_rec(req, c);
+ if (r && (r->status == HTTP_OK)) {
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+
+ if (cs) {
+ cs->state = CONN_STATE_HANDLER;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): start process_request", task->id);
+ ap_process_request(r);
+ if (task->frozen) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): process_request frozen", task->id);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): process_request done", task->id);
+
+ /* After the call to ap_process_request, the
+ * request pool will have been deleted. We set
+ * r=NULL here to ensure that any dereference
+ * of r that might be added later in this function
+ * will result in a segfault immediately instead
+ * of nondeterministic failures later.
+ */
+ if (cs)
+ cs->state = CONN_STATE_WRITE_COMPLETION;
+ r = NULL;
+ }
+ else if (!r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): create request_rec failed, r=NULL", task->id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): create request_rec failed, r->status=%d",
+ task->id, r->status);
+ }
+
+ return APR_SUCCESS;
+}
+
+static int h2_task_process_conn(conn_rec* c)
+{
+ h2_ctx *ctx;
+
+ if (!c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_ctx_get(c, 0);
+ if (h2_ctx_is_task(ctx)) {
+ if (!ctx->task->ser_headers) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, processing request directly");
+ h2_task_process_request(ctx->task, c);
+ return DONE;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s), serialized handling", ctx->task->id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "slave_conn(%ld): has no task", c->id);
+ }
+ return DECLINED;
+}
+
+apr_status_t h2_task_freeze(h2_task *task)
+{
+ if (!task->frozen) {
+ task->frozen = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406)
+ "h2_task(%s), frozen", task->id);
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_task_thaw(h2_task *task)
+{
+ if (task->frozen) {
+ task->frozen = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407)
+ "h2_task(%s), thawed", task->id);
+ }
+ task->detached = 1;
+ return APR_SUCCESS;
+}
+
+int h2_task_is_detached(h2_task *task)
+{
+ return task->detached;
+}
diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h
new file mode 100644
index 00000000..010005a3
--- /dev/null
+++ b/modules/http2/h2_task.h
@@ -0,0 +1,132 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_task__
+#define __mod_h2__h2_task__
+
+#include <http_core.h>
+
+/**
+ * A h2_task fakes a HTTP/1.1 request from the data in a HTTP/2 stream
+ * (HEADER+CONT.+DATA) the module recieves.
+ *
+ * In order to answer a HTTP/2 stream, we want all Apache httpd infrastructure
+ * to be involved as usual, as if this stream can as a separate HTTP/1.1
+ * request. The basic trickery to do so was derived from google's mod_spdy
+ * source. Basically, we fake a new conn_rec object, even with its own
+ * socket and give it to ap_process_connection().
+ *
+ * Since h2_task instances are executed in separate threads, we may have
+ * different lifetimes than our h2_stream or h2_session instances. Basically,
+ * we would like to be as standalone as possible.
+ *
+ * Finally, to keep certain connection level filters, such as ourselves and
+ * especially mod_ssl ones, from messing with our data, we need a filter
+ * of our own to disble those.
+ */
+
+struct apr_thread_cond_t;
+struct h2_bucket_beam;
+struct h2_conn;
+struct h2_mplx;
+struct h2_task;
+struct h2_req_engine;
+struct h2_request;
+struct h2_response;
+struct h2_worker;
+
+typedef struct h2_task h2_task;
+
+struct h2_task {
+ const char *id;
+ int stream_id;
+ conn_rec *c;
+ apr_pool_t *pool;
+
+ const struct h2_request *request;
+ struct h2_response *response;
+
+ struct {
+ struct h2_bucket_beam *beam;
+ apr_bucket_brigade *bb;
+ apr_bucket_brigade *tmp;
+ apr_read_type_e block;
+ unsigned int chunked : 1;
+ unsigned int eos : 1;
+ unsigned int eos_written : 1;
+ } input;
+ struct {
+ struct h2_bucket_beam *beam;
+ struct h2_from_h1 *from_h1;
+ unsigned int response_open : 1;
+ apr_off_t written;
+ apr_bucket_brigade *bb;
+ } output;
+
+ struct h2_mplx *mplx;
+ struct apr_thread_cond_t *cond;
+
+ int rst_error; /* h2 related stream abort error */
+ unsigned int filters_set : 1;
+ unsigned int ser_headers : 1;
+ unsigned int frozen : 1;
+ unsigned int blocking : 1;
+ unsigned int detached : 1;
+ unsigned int submitted : 1; /* response has been submitted to client */
+ unsigned int worker_started : 1; /* h2_worker started processing for this io */
+ unsigned int worker_done : 1; /* h2_worker finished for this io */
+
+ apr_time_t started_at; /* when processing started */
+ apr_time_t done_at; /* when processing was done */
+ apr_bucket *eor;
+
+ struct h2_req_engine *engine; /* engine hosted by this task */
+ struct h2_req_engine *assigned; /* engine that task has been assigned to */
+ request_rec *r; /* request being processed in this task */
+};
+
+h2_task *h2_task_create(conn_rec *c, const struct h2_request *req,
+ struct h2_bucket_beam *input, struct h2_mplx *mplx);
+
+void h2_task_destroy(h2_task *task);
+
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread);
+
+void h2_task_set_response(h2_task *task, struct h2_response *response);
+
+void h2_task_redo(h2_task *task);
+int h2_task_can_redo(h2_task *task);
+
+/**
+ * Reset the task with the given error code, resets all input/output.
+ */
+void h2_task_rst(h2_task *task, int error);
+
+void h2_task_register_hooks(void);
+/*
+ * One time, post config intialization.
+ */
+apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s);
+
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
+
+apr_status_t h2_task_freeze(h2_task *task);
+apr_status_t h2_task_thaw(h2_task *task);
+int h2_task_is_detached(h2_task *task);
+
+void h2_task_set_io_blocking(h2_task *task, int blocking);
+
+#endif /* defined(__mod_h2__h2_task__) */
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
new file mode 100644
index 00000000..8d1060e5
--- /dev/null
+++ b/modules/http2/h2_util.c
@@ -0,0 +1,1455 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <http_request.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2.h"
+#include "h2_util.h"
+
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(apr_uint32_t n)
+{
+ int lz = 0;
+ if (!n) {
+ return 0;
+ }
+ if (!(n & 0xffff0000u)) {
+ lz += 16;
+ n = (n << 16);
+ }
+ if (!(n & 0xff000000u)) {
+ lz += 8;
+ n = (n << 8);
+ }
+ if (!(n & 0xf0000000u)) {
+ lz += 4;
+ n = (n << 4);
+ }
+ if (!(n & 0xc0000000u)) {
+ lz += 2;
+ n = (n << 2);
+ }
+ if (!(n & 0x80000000u)) {
+ lz += 1;
+ }
+
+ return 31 - lz;
+}
+
+size_t h2_util_hex_dump(char *buffer, size_t maxlen,
+ const char *data, size_t datalen)
+{
+ size_t offset = 0;
+ size_t maxoffset = (maxlen-4);
+ size_t i;
+ for (i = 0; i < datalen && offset < maxoffset; ++i) {
+ const char *sep = (i && i % 16 == 0)? "\n" : " ";
+ int n = apr_snprintf(buffer+offset, maxoffset-offset,
+ "%2x%s", ((unsigned int)data[i]&0xff), sep);
+ offset += n;
+ }
+ strcpy(buffer+offset, (i<datalen)? "..." : "");
+ return strlen(buffer);
+}
+
+size_t h2_util_header_print(char *buffer, size_t maxlen,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen)
+{
+ size_t offset = 0;
+ size_t i;
+ for (i = 0; i < namelen && offset < maxlen; ++i, ++offset) {
+ buffer[offset] = name[i];
+ }
+ for (i = 0; i < 2 && offset < maxlen; ++i, ++offset) {
+ buffer[offset] = ": "[i];
+ }
+ for (i = 0; i < valuelen && offset < maxlen; ++i, ++offset) {
+ buffer[offset] = value[i];
+ }
+ buffer[offset] = '\0';
+ return offset;
+}
+
+
+void h2_util_camel_case_header(char *s, size_t len)
+{
+ size_t start = 1;
+ size_t i;
+ for (i = 0; i < len; ++i) {
+ if (start) {
+ if (s[i] >= 'a' && s[i] <= 'z') {
+ s[i] -= 'a' - 'A';
+ }
+
+ start = 0;
+ }
+ else if (s[i] == '-') {
+ start = 1;
+ }
+ }
+}
+
+static const int BASE64URL_UINT6[] = {
+/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 1 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, /* 2 */
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, /* 3 */
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, 63, /* 5 */
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, /* 7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 8 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 9 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* a */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* b */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* c */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* d */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* e */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 /* f */
+};
+static const char BASE64URL_CHARS[] = {
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0 - 9 */
+ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10 - 19 */
+ 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20 - 29 */
+ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', /* 30 - 39 */
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', /* 40 - 49 */
+ 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', /* 50 - 59 */
+ '8', '9', '-', '_', ' ', ' ', ' ', ' ', ' ', ' ', /* 60 - 69 */
+};
+
+apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded,
+ apr_pool_t *pool)
+{
+ const unsigned char *e = (const unsigned char *)encoded;
+ const unsigned char *p = e;
+ unsigned char *d;
+ int n;
+ apr_size_t len, mlen, remain, i;
+
+ while (*p && BASE64URL_UINT6[ *p ] != -1) {
+ ++p;
+ }
+ len = p - e;
+ mlen = (len/4)*4;
+ *decoded = apr_pcalloc(pool, len+1);
+
+ i = 0;
+ d = (unsigned char*)*decoded;
+ for (; i < mlen; i += 4) {
+ n = ((BASE64URL_UINT6[ e[i+0] ] << 18) +
+ (BASE64URL_UINT6[ e[i+1] ] << 12) +
+ (BASE64URL_UINT6[ e[i+2] ] << 6) +
+ (BASE64URL_UINT6[ e[i+3] ]));
+ *d++ = n >> 16;
+ *d++ = n >> 8 & 0xffu;
+ *d++ = n & 0xffu;
+ }
+ remain = len - mlen;
+ switch (remain) {
+ case 2:
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12));
+ *d++ = n >> 16;
+ break;
+ case 3:
+ n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
+ (BASE64URL_UINT6[ e[mlen+1] ] << 12) +
+ (BASE64URL_UINT6[ e[mlen+2] ] << 6));
+ *d++ = n >> 16;
+ *d++ = n >> 8 & 0xffu;
+ break;
+ default: /* do nothing */
+ break;
+ }
+ return mlen/4*3 + remain;
+}
+
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t len, apr_pool_t *pool)
+{
+ apr_size_t mlen = ((len+2)/3)*3;
+ apr_size_t slen = (mlen/3)*4;
+ apr_size_t i;
+ const unsigned char *udata = (const unsigned char*)data;
+ char *enc, *p = apr_pcalloc(pool, slen+1); /* 0 terminated */
+
+ enc = p;
+ for (i = 0; i < mlen; i+= 3) {
+ *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ];
+ *p++ = BASE64URL_CHARS[ ((udata[i] << 4) +
+ ((i+1 < len)? (udata[i+1] >> 4) : 0)) & 0x3fu ];
+ *p++ = BASE64URL_CHARS[ ((udata[i+1] << 2) +
+ ((i+2 < len)? (udata[i+2] >> 6) : 0)) & 0x3fu ];
+ if (i+2 < len) {
+ *p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ];
+ }
+ }
+
+ return enc;
+}
+
+int h2_util_contains_token(apr_pool_t *pool, const char *s, const char *token)
+{
+ char *c;
+ if (s) {
+ if (!apr_strnatcasecmp(s, token)) { /* the simple life */
+ return 1;
+ }
+
+ for (c = ap_get_token(pool, &s, 0); c && *c;
+ c = *s? ap_get_token(pool, &s, 0) : NULL) {
+ if (!apr_strnatcasecmp(c, token)) { /* seeing the token? */
+ return 1;
+ }
+ while (*s++ == ';') { /* skip parameters */
+ ap_get_token(pool, &s, 0);
+ }
+ if (*s++ != ',') { /* need comma separation */
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+const char *h2_util_first_token_match(apr_pool_t *pool, const char *s,
+ const char *tokens[], apr_size_t len)
+{
+ char *c;
+ apr_size_t i;
+ if (s && *s) {
+ for (c = ap_get_token(pool, &s, 0); c && *c;
+ c = *s? ap_get_token(pool, &s, 0) : NULL) {
+ for (i = 0; i < len; ++i) {
+ if (!apr_strnatcasecmp(c, tokens[i])) {
+ return tokens[i];
+ }
+ }
+ while (*s++ == ';') { /* skip parameters */
+ ap_get_token(pool, &s, 0);
+ }
+ if (*s++ != ',') { /* need comma separation */
+ return 0;
+ }
+ }
+ }
+ return NULL;
+}
+
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+struct h2_ihash_t {
+ apr_hash_t *hash;
+ size_t ioff;
+};
+
+static unsigned int ihash(const char *key, apr_ssize_t *klen)
+{
+ return (unsigned int)(*((int*)key));
+}
+
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+{
+ h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t));
+ ih->hash = apr_hash_make_custom(pool, ihash);
+ ih->ioff = offset_of_int;
+ return ih;
+}
+
+size_t h2_ihash_count(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash);
+}
+
+int h2_ihash_empty(h2_ihash_t *ih)
+{
+ return apr_hash_count(ih->hash) == 0;
+}
+
+void *h2_ihash_get(h2_ihash_t *ih, int id)
+{
+ return apr_hash_get(ih->hash, &id, sizeof(id));
+}
+
+typedef struct {
+ h2_ihash_iter_t *iter;
+ void *ctx;
+} iter_ctx;
+
+static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen,
+ const void *val)
+{
+ iter_ctx *ictx = ctx;
+ return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
+}
+
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx)
+{
+ iter_ctx ictx;
+ ictx.iter = fn;
+ ictx.ctx = ctx;
+ return apr_hash_do(ihash_iter, &ictx, ih->hash);
+}
+
+void h2_ihash_add(h2_ihash_t *ih, void *val)
+{
+ apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
+}
+
+void h2_ihash_remove(h2_ihash_t *ih, int id)
+{
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val)
+{
+ int id = *((int*)((char *)val + ih->ioff));
+ apr_hash_set(ih->hash, &id, sizeof(id), NULL);
+}
+
+
+void h2_ihash_clear(h2_ihash_t *ih)
+{
+ apr_hash_clear(ih->hash);
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ void **buffer;
+ size_t max;
+ size_t len;
+} collect_ctx;
+
+static int collect_iter(void *x, void *val)
+{
+ collect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = val;
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max)
+{
+ collect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, collect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove_val(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+typedef struct {
+ h2_ihash_t *ih;
+ int *buffer;
+ size_t max;
+ size_t len;
+} icollect_ctx;
+
+static int icollect_iter(void *x, void *val)
+{
+ icollect_ctx *ctx = x;
+ if (ctx->len < ctx->max) {
+ ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff));
+ return 1;
+ }
+ return 0;
+}
+
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max)
+{
+ icollect_ctx ctx;
+ size_t i;
+
+ ctx.ih = ih;
+ ctx.buffer = buffer;
+ ctx.max = max;
+ ctx.len = 0;
+ h2_ihash_iter(ih, icollect_iter, &ctx);
+ for (i = 0; i < ctx.len; ++i) {
+ h2_ihash_remove(ih, buffer[i]);
+ }
+ return ctx.len;
+}
+
+/*******************************************************************************
+ * iqueue - sorted list of int
+ ******************************************************************************/
+
+static void iq_grow(h2_iqueue *q, int nlen);
+static void iq_swap(h2_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx);
+
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
+{
+ h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
+ if (q) {
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
+ }
+ return q;
+}
+
+int h2_iq_empty(h2_iqueue *q)
+{
+ return q->nelts == 0;
+}
+
+int h2_iq_count(h2_iqueue *q)
+{
+ return q->nelts;
+}
+
+
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+{
+ int i;
+
+ if (q->nelts >= q->nalloc) {
+ iq_grow(q, q->nalloc * 2);
+ }
+
+ i = (q->head + q->nelts) % q->nalloc;
+ q->elts[i] = sid;
+ ++q->nelts;
+
+ if (cmp) {
+ /* bubble it to the front of the queue */
+ iq_bubble_up(q, i, q->head, cmp, ctx);
+ }
+}
+
+int h2_iq_remove(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ break;
+ }
+ }
+
+ if (i < q->nelts) {
+ ++i;
+ for (; i < q->nelts; ++i) {
+ q->elts[(q->head+i-1)%q->nalloc] = q->elts[(q->head+i)%q->nalloc];
+ }
+ --q->nelts;
+ return 1;
+ }
+ return 0;
+}
+
+void h2_iq_clear(h2_iqueue *q)
+{
+ q->nelts = 0;
+}
+
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
+{
+ /* Assume that changes in ordering are minimal. This needs,
+ * best case, q->nelts - 1 comparisions to check that nothing
+ * changed.
+ */
+ if (q->nelts > 0) {
+ int i, ni, prev, last;
+
+ /* Start at the end of the queue and create a tail of sorted
+ * entries. Make that tail one element longer in each iteration.
+ */
+ last = i = (q->head + q->nelts - 1) % q->nalloc;
+ while (i != q->head) {
+ prev = (q->nalloc + i - 1) % q->nalloc;
+
+ ni = iq_bubble_up(q, i, prev, cmp, ctx);
+ if (ni == prev) {
+ /* i bubbled one up, bubble the new i down, which
+ * keeps all tasks below i sorted. */
+ iq_bubble_down(q, i, last, cmp, ctx);
+ }
+ i = prev;
+ };
+ }
+}
+
+
+int h2_iq_shift(h2_iqueue *q)
+{
+ int sid;
+
+ if (q->nelts <= 0) {
+ return 0;
+ }
+
+ sid = q->elts[q->head];
+ q->head = (q->head + 1) % q->nalloc;
+ q->nelts--;
+
+ return sid;
+}
+
+static void iq_grow(h2_iqueue *q, int nlen)
+{
+ if (nlen > q->nalloc) {
+ int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
+ if (q->nelts > 0) {
+ int l = ((q->head + q->nelts) % q->nalloc) - q->head;
+
+ memmove(nq, q->elts + q->head, sizeof(int) * l);
+ if (l < q->nelts) {
+ /* elts wrapped, append elts in [0, remain] to nq */
+ int remain = q->nelts - l;
+ memmove(nq + l, q->elts, sizeof(int) * remain);
+ }
+ }
+ q->elts = nq;
+ q->nalloc = nlen;
+ q->head = 0;
+ }
+}
+
+static void iq_swap(h2_iqueue *q, int i, int j)
+{
+ int x = q->elts[i];
+ q->elts[i] = q->elts[j];
+ q->elts[j] = x;
+}
+
+static int iq_bubble_up(h2_iqueue *q, int i, int top,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int prev;
+ while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
+ && (*cmp)(q->elts[i], q->elts[prev], ctx) < 0) {
+ iq_swap(q, prev, i);
+ i = prev;
+ }
+ return i;
+}
+
+static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
+ h2_iq_cmp *cmp, void *ctx)
+{
+ int next;
+ while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
+ && (*cmp)(q->elts[i], q->elts[next], ctx) > 0) {
+ iq_swap(q, next, i);
+ i = next;
+ }
+ return i;
+}
+
+/*******************************************************************************
+ * h2_util for apt_table_t
+ ******************************************************************************/
+
+typedef struct {
+ apr_size_t bytes;
+ apr_size_t pair_extra;
+} table_bytes_ctx;
+
+static int count_bytes(void *x, const char *key, const char *value)
+{
+ table_bytes_ctx *ctx = x;
+ if (key) {
+ ctx->bytes += strlen(key);
+ }
+ if (value) {
+ ctx->bytes += strlen(value);
+ }
+ ctx->bytes += ctx->pair_extra;
+ return 1;
+}
+
+apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra)
+{
+ table_bytes_ctx ctx;
+
+ ctx.bytes = 0;
+ ctx.pair_extra = pair_extra;
+ apr_table_do(count_bytes, &ctx, t, NULL);
+ return ctx.bytes;
+}
+
+
+/*******************************************************************************
+ * h2_util for bucket brigades
+ ******************************************************************************/
+
+static apr_status_t last_not_included(apr_bucket_brigade *bb,
+ apr_off_t maxlen,
+ int same_alloc,
+ apr_size_t *pfile_buckets_allowed,
+ apr_bucket **pend)
+{
+ apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+ int files_allowed = pfile_buckets_allowed? *pfile_buckets_allowed : 0;
+
+ if (maxlen >= 0) {
+ /* Find the bucket, up to which we reach maxlen/mem bytes */
+ for (b = APR_BRIGADE_FIRST(bb);
+ (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* included */
+ }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (maxlen == 0 && b->length > 0) {
+ *pend = b;
+ return status;
+ }
+
+ if (same_alloc && APR_BUCKET_IS_FILE(b)) {
+ /* we like it move it, always */
+ }
+ else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) {
+ /* this has no memory footprint really unless
+ * it is read, disregard it in length count,
+ * unless we do not move the file buckets */
+ --files_allowed;
+ }
+ else if (maxlen < b->length) {
+ apr_bucket_split(b, maxlen);
+ maxlen = 0;
+ }
+ else {
+ maxlen -= b->length;
+ }
+ }
+ }
+ }
+ *pend = APR_BRIGADE_SENTINEL(bb);
+ return status;
+}
+
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
+{
+ apr_bucket *b, *next;
+ apr_off_t remain = length;
+ apr_status_t status = APR_SUCCESS;
+
+ for (b = APR_BRIGADE_FIRST(src);
+ b != APR_BRIGADE_SENTINEL(src);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* fall through */
+ }
+ else {
+ if (remain == b->length) {
+ /* fall through */
+ }
+ else if (remain <= 0) {
+ return status;
+ }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (remain < b->length) {
+ apr_bucket_split(b, remain);
+ }
+ }
+ }
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ remain -= b->length;
+ }
+ return status;
+}
+
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length)
+{
+ apr_bucket *b, *next;
+ apr_off_t remain = length;
+ apr_status_t status = APR_SUCCESS;
+
+ for (b = APR_BRIGADE_FIRST(src);
+ b != APR_BRIGADE_SENTINEL(src);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* fall through */
+ }
+ else {
+ if (remain == b->length) {
+ /* fall through */
+ }
+ else if (remain <= 0) {
+ return status;
+ }
+ else {
+ if (b->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ }
+
+ if (remain < b->length) {
+ apr_bucket_split(b, remain);
+ }
+ }
+ }
+ status = apr_bucket_copy(b, &b);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ remain -= b->length;
+ }
+ return status;
+}
+
+int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len)
+{
+ apr_bucket *b, *end;
+
+ apr_status_t status = last_not_included(bb, len, 0, 0, &end);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb) && b != end;
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (APR_BUCKET_IS_EOS(b)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ apr_status_t status;
+ apr_off_t blen = 0;
+
+ /* test read to determine available length */
+ status = apr_brigade_length(bb, 1, &blen);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ else if (blen == 0) {
+ /* brigade without data, does it have an EOS bucket somwhere? */
+ *plen = 0;
+ *peos = h2_util_has_eos(bb, -1);
+ }
+ else {
+ /* data in the brigade, limit the length returned. Check for EOS
+ * bucket only if we indicate data. This is required since plen == 0
+ * means "the whole brigade" for h2_util_hash_eos()
+ */
+ if (blen < *plen || *plen < 0) {
+ *plen = blen;
+ }
+ *peos = h2_util_has_eos(bb, *plen);
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
+ h2_util_pass_cb *cb, void *ctx,
+ apr_off_t *plen, int *peos)
+{
+ apr_status_t status = APR_SUCCESS;
+ int consume = (cb != NULL);
+ apr_off_t written = 0;
+ apr_off_t avail = *plen;
+ apr_bucket *next, *b;
+
+ /* Pass data in our brigade through the callback until the length
+ * is satisfied or we encounter an EOS.
+ */
+ *peos = 0;
+ for (b = APR_BRIGADE_FIRST(bb);
+ (status == APR_SUCCESS) && (b != APR_BRIGADE_SENTINEL(bb));
+ b = next) {
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *peos = 1;
+ }
+ else {
+ /* ignore */
+ }
+ }
+ else if (avail <= 0) {
+ break;
+ }
+ else {
+ const char *data = NULL;
+ apr_size_t data_len;
+
+ if (b->length == ((apr_size_t)-1)) {
+ /* read to determine length */
+ status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ);
+ }
+ else {
+ data_len = b->length;
+ }
+
+ if (data_len > avail) {
+ apr_bucket_split(b, avail);
+ data_len = avail;
+ }
+
+ if (consume) {
+ if (!data) {
+ status = apr_bucket_read(b, &data, &data_len,
+ APR_NONBLOCK_READ);
+ }
+ if (status == APR_SUCCESS) {
+ status = cb(ctx, data, data_len);
+ }
+ }
+ else {
+ data_len = b->length;
+ }
+ avail -= data_len;
+ written += data_len;
+ }
+
+ next = APR_BUCKET_NEXT(b);
+ if (consume) {
+ apr_bucket_delete(b);
+ }
+ }
+
+ *plen = written;
+ if (status == APR_SUCCESS && !*peos && !*plen) {
+ return APR_EAGAIN;
+ }
+ return status;
+}
+
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep)
+{
+ apr_size_t off = 0;
+ if (sep && *sep) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s", sep);
+ }
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eos");
+ }
+ else if (APR_BUCKET_IS_FLUSH(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "flush");
+ }
+ else if (AP_BUCKET_IS_EOR(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "eor");
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "meta(unknown)");
+ }
+ }
+ else {
+ const char *btype = "data";
+ if (APR_BUCKET_IS_FILE(b)) {
+ btype = "file";
+ }
+ else if (APR_BUCKET_IS_PIPE(b)) {
+ btype = "pipe";
+ }
+ else if (APR_BUCKET_IS_SOCKET(b)) {
+ btype = "socket";
+ }
+ else if (APR_BUCKET_IS_HEAP(b)) {
+ btype = "heap";
+ }
+ else if (APR_BUCKET_IS_TRANSIENT(b)) {
+ btype = "transient";
+ }
+ else if (APR_BUCKET_IS_IMMORTAL(b)) {
+ btype = "immortal";
+ }
+#if APR_HAS_MMAP
+ else if (APR_BUCKET_IS_MMAP(b)) {
+ btype = "mmap";
+ }
+#endif
+ else if (APR_BUCKET_IS_POOL(b)) {
+ btype = "pool";
+ }
+
+ off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]",
+ btype,
+ (long)(b->length == ((apr_size_t)-1)?
+ -1 : b->length));
+ }
+ return off;
+}
+
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb)
+{
+ apr_size_t off = 0;
+ const char *sp = "";
+ apr_bucket *b;
+
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
+ for (b = APR_BRIGADE_FIRST(bb);
+ bmax && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
+ sp = " ";
+ }
+ off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
+ }
+ return off;
+}
+
+apr_status_t h2_append_brigade(apr_bucket_brigade *to,
+ apr_bucket_brigade *from,
+ apr_off_t *plen,
+ int *peos)
+{
+ apr_bucket *e;
+ apr_off_t len = 0, remain = *plen;
+ apr_status_t rv;
+
+ *peos = 0;
+
+ while (!APR_BRIGADE_EMPTY(from)) {
+ e = APR_BRIGADE_FIRST(from);
+
+ if (APR_BUCKET_IS_METADATA(e)) {
+ if (APR_BUCKET_IS_EOS(e)) {
+ *peos = 1;
+ apr_bucket_delete(e);
+ continue;
+ }
+ }
+ else {
+ if (remain > 0 && e->length == ((apr_size_t)-1)) {
+ const char *ign;
+ apr_size_t ilen;
+ rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+
+ if (remain < e->length) {
+ if (remain <= 0) {
+ return APR_SUCCESS;
+ }
+ apr_bucket_split(e, remain);
+ }
+ }
+
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(to, e);
+ len += e->length;
+ remain -= e->length;
+ }
+
+ *plen = len;
+ return APR_SUCCESS;
+}
+
+apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb)
+{
+ apr_bucket *b;
+ apr_off_t total = 0;
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ total += sizeof(*b);
+ if (b->length > 0) {
+ if (APR_BUCKET_IS_HEAP(b)
+ || APR_BUCKET_IS_POOL(b)) {
+ total += b->length;
+ }
+ }
+ }
+ return total;
+}
+
+
+/*******************************************************************************
+ * h2_ngheader
+ ******************************************************************************/
+
+int h2_util_ignore_header(const char *name)
+{
+ /* never forward, ch. 8.1.2.2 */
+ return (H2_HD_MATCH_LIT_CS("connection", name)
+ || H2_HD_MATCH_LIT_CS("proxy-connection", name)
+ || H2_HD_MATCH_LIT_CS("upgrade", name)
+ || H2_HD_MATCH_LIT_CS("keep-alive", name)
+ || H2_HD_MATCH_LIT_CS("transfer-encoding", name));
+}
+
+static int count_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ (*((size_t*)ctx))++;
+ }
+ return 1;
+}
+
+#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v))
+#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v))
+
+static int add_header(h2_ngheader *ngh,
+ const char *key, size_t key_len,
+ const char *value, size_t val_len)
+{
+ nghttp2_nv *nv = &ngh->nv[ngh->nvlen++];
+
+ nv->name = (uint8_t*)key;
+ nv->namelen = key_len;
+ nv->value = (uint8_t*)value;
+ nv->valuelen = val_len;
+ return 1;
+}
+
+static int add_table_header(void *ctx, const char *key, const char *value)
+{
+ if (!h2_util_ignore_header(key)) {
+ add_header(ctx, key, strlen(key), value, strlen(value));
+ }
+ return 1;
+}
+
+
+h2_ngheader *h2_util_ngheader_make(apr_pool_t *p, apr_table_t *header)
+{
+ h2_ngheader *ngh;
+ size_t n;
+
+ n = 0;
+ apr_table_do(count_header, &n, header, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ apr_table_do(add_table_header, ngh, header, NULL);
+
+ return ngh;
+}
+
+h2_ngheader *h2_util_ngheader_make_res(apr_pool_t *p,
+ int http_status,
+ apr_table_t *header)
+{
+ h2_ngheader *ngh;
+ size_t n;
+
+ n = 1;
+ apr_table_do(count_header, &n, header, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ NV_ADD_LIT_CS(ngh, ":status", apr_psprintf(p, "%d", http_status));
+ apr_table_do(add_table_header, ngh, header, NULL);
+
+ return ngh;
+}
+
+h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
+ const struct h2_request *req)
+{
+
+ h2_ngheader *ngh;
+ size_t n;
+
+ AP_DEBUG_ASSERT(req);
+ AP_DEBUG_ASSERT(req->scheme);
+ AP_DEBUG_ASSERT(req->authority);
+ AP_DEBUG_ASSERT(req->path);
+ AP_DEBUG_ASSERT(req->method);
+
+ n = 4;
+ apr_table_do(count_header, &n, req->headers, NULL);
+
+ ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ NV_ADD_LIT_CS(ngh, ":scheme", req->scheme);
+ NV_ADD_LIT_CS(ngh, ":authority", req->authority);
+ NV_ADD_LIT_CS(ngh, ":path", req->path);
+ NV_ADD_LIT_CS(ngh, ":method", req->method);
+ apr_table_do(add_table_header, ngh, req->headers, NULL);
+
+ return ngh;
+}
+
+/*******************************************************************************
+ * header HTTP/1 <-> HTTP/2 conversions
+ ******************************************************************************/
+
+
+typedef struct {
+ const char *name;
+ size_t len;
+} literal;
+
+#define H2_DEF_LITERAL(n) { (n), (sizeof(n)-1) }
+#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
+
+static literal IgnoredRequestHeaders[] = {
+ H2_DEF_LITERAL("expect"),
+ H2_DEF_LITERAL("upgrade"),
+ H2_DEF_LITERAL("connection"),
+ H2_DEF_LITERAL("keep-alive"),
+ H2_DEF_LITERAL("http2-settings"),
+ H2_DEF_LITERAL("proxy-connection"),
+ H2_DEF_LITERAL("transfer-encoding"),
+};
+static literal IgnoredRequestTrailers[] = { /* Ignore, see rfc7230, ch. 4.1.2 */
+ H2_DEF_LITERAL("te"),
+ H2_DEF_LITERAL("host"),
+ H2_DEF_LITERAL("range"),
+ H2_DEF_LITERAL("cookie"),
+ H2_DEF_LITERAL("expect"),
+ H2_DEF_LITERAL("pragma"),
+ H2_DEF_LITERAL("max-forwards"),
+ H2_DEF_LITERAL("cache-control"),
+ H2_DEF_LITERAL("authorization"),
+ H2_DEF_LITERAL("content-length"),
+ H2_DEF_LITERAL("proxy-authorization"),
+};
+static literal IgnoredResponseTrailers[] = {
+ H2_DEF_LITERAL("age"),
+ H2_DEF_LITERAL("date"),
+ H2_DEF_LITERAL("vary"),
+ H2_DEF_LITERAL("cookie"),
+ H2_DEF_LITERAL("expires"),
+ H2_DEF_LITERAL("warning"),
+ H2_DEF_LITERAL("location"),
+ H2_DEF_LITERAL("retry-after"),
+ H2_DEF_LITERAL("cache-control"),
+ H2_DEF_LITERAL("www-authenticate"),
+ H2_DEF_LITERAL("proxy-authenticate"),
+};
+static literal IgnoredProxyRespHds[] = {
+ H2_DEF_LITERAL("alt-svc"),
+};
+
+static int ignore_header(const literal *lits, size_t llen,
+ const char *name, size_t nlen)
+{
+ const literal *lit;
+ int i;
+
+ for (i = 0; i < llen; ++i) {
+ lit = &lits[i];
+ if (lit->len == nlen && !apr_strnatcasecmp(lit->name, name)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int h2_req_ignore_header(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len);
+}
+
+int h2_req_ignore_trailer(const char *name, size_t len)
+{
+ return (h2_req_ignore_header(name, len)
+ || ignore_header(H2_LIT_ARGS(IgnoredRequestTrailers), name, len));
+}
+
+int h2_res_ignore_trailer(const char *name, size_t len)
+{
+ return ignore_header(H2_LIT_ARGS(IgnoredResponseTrailers), name, len);
+}
+
+int h2_proxy_res_ignore_header(const char *name, size_t len)
+{
+ return (h2_req_ignore_header(name, len)
+ || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
+}
+
+apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ char *hname, *hvalue;
+
+ if (h2_req_ignore_header(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
+ const char *existing = apr_table_get(headers, "cookie");
+ if (existing) {
+ char *nval;
+
+ /* Cookie header come separately in HTTP/2, but need
+ * to be merged by "; " (instead of default ", ")
+ */
+ hvalue = apr_pstrndup(pool, value, vlen);
+ nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
+ apr_table_setn(headers, "Cookie", nval);
+ return APR_SUCCESS;
+ }
+ }
+ else if (H2_HD_MATCH_LIT("host", name, nlen)) {
+ if (apr_table_get(headers, "Host")) {
+ return APR_SUCCESS; /* ignore duplicate */
+ }
+ }
+
+ hname = apr_pstrndup(pool, name, nlen);
+ hvalue = apr_pstrndup(pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(headers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * h2 request handling
+ ******************************************************************************/
+
+h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header, int serialize)
+{
+ h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+
+ req->id = id;
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+ req->serialize = serialize;
+
+ return req;
+}
+
+h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize)
+{
+ return h2_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
+}
+
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
+{
+ h1_ctx *x = ctx;
+ size_t klen = strlen(key);
+ if (!h2_req_ignore_header(key, klen)) {
+ h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
+ }
+ return 1;
+}
+
+apr_status_t h2_req_make(h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers)
+{
+ h1_ctx x;
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+
+ AP_DEBUG_ASSERT(req->scheme);
+ AP_DEBUG_ASSERT(req->authority);
+ AP_DEBUG_ASSERT(req->path);
+ AP_DEBUG_ASSERT(req->method);
+
+ x.pool = pool;
+ x.headers = req->headers;
+ apr_table_do(set_h1_header, &x, headers, NULL);
+ return APR_SUCCESS;
+}
+
+/*******************************************************************************
+ * frame logging
+ ******************************************************************************/
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+{
+ char scratch[128];
+ size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
+
+ switch (frame->hd.type) {
+ case NGHTTP2_DATA: {
+ return apr_snprintf(buffer, maxlen,
+ "DATA[length=%d, flags=%d, stream=%d, padlen=%d]",
+ (int)frame->hd.length, frame->hd.flags,
+ frame->hd.stream_id, (int)frame->data.padlen);
+ }
+ case NGHTTP2_HEADERS: {
+ return apr_snprintf(buffer, maxlen,
+ "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM));
+ }
+ case NGHTTP2_PRIORITY: {
+ return apr_snprintf(buffer, maxlen,
+ "PRIORITY[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_RST_STREAM: {
+ return apr_snprintf(buffer, maxlen,
+ "RST_STREAM[length=%d, flags=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+ case NGHTTP2_SETTINGS: {
+ if (frame->hd.flags & NGHTTP2_FLAG_ACK) {
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[ack=1, stream=%d]",
+ frame->hd.stream_id);
+ }
+ return apr_snprintf(buffer, maxlen,
+ "SETTINGS[length=%d, stream=%d]",
+ (int)frame->hd.length, frame->hd.stream_id);
+ }
+ case NGHTTP2_PUSH_PROMISE: {
+ return apr_snprintf(buffer, maxlen,
+ "PUSH_PROMISE[length=%d, hend=%d, stream=%d]",
+ (int)frame->hd.length,
+ !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS),
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_PING: {
+ return apr_snprintf(buffer, maxlen,
+ "PING[length=%d, ack=%d, stream=%d]",
+ (int)frame->hd.length,
+ frame->hd.flags&NGHTTP2_FLAG_ACK,
+ frame->hd.stream_id);
+ }
+ case NGHTTP2_GOAWAY: {
+ size_t len = (frame->goaway.opaque_data_len < s_len)?
+ frame->goaway.opaque_data_len : s_len-1;
+ memcpy(scratch, frame->goaway.opaque_data, len);
+ scratch[len] = '\0';
+ return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
+ "last_stream=%d]", frame->goaway.error_code,
+ scratch, frame->goaway.last_stream_id);
+ }
+ case NGHTTP2_WINDOW_UPDATE: {
+ return apr_snprintf(buffer, maxlen,
+ "WINDOW_UPDATE[stream=%d, incr=%d]",
+ frame->hd.stream_id,
+ frame->window_update.window_size_increment);
+ }
+ default:
+ return apr_snprintf(buffer, maxlen,
+ "type=%d[length=%d, flags=%d, stream=%d]",
+ frame->hd.type, (int)frame->hd.length,
+ frame->hd.flags, frame->hd.stream_id);
+ }
+}
+
+/*******************************************************************************
+ * push policy
+ ******************************************************************************/
+void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled)
+{
+ h2_push_policy policy = H2_PUSH_NONE;
+ if (push_enabled) {
+ const char *val = apr_table_get(req->headers, "accept-push-policy");
+ if (val) {
+ if (ap_find_token(p, val, "fast-load")) {
+ policy = H2_PUSH_FAST_LOAD;
+ }
+ else if (ap_find_token(p, val, "head")) {
+ policy = H2_PUSH_HEAD;
+ }
+ else if (ap_find_token(p, val, "default")) {
+ policy = H2_PUSH_DEFAULT;
+ }
+ else if (ap_find_token(p, val, "none")) {
+ policy = H2_PUSH_NONE;
+ }
+ else {
+ /* nothing known found in this header, go by default */
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ else {
+ policy = H2_PUSH_DEFAULT;
+ }
+ }
+ req->push_policy = policy;
+}
+
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
new file mode 100644
index 00000000..7cae0ac0
--- /dev/null
+++ b/modules/http2/h2_util.h
@@ -0,0 +1,389 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_util__
+#define __mod_h2__h2_util__
+
+#include <nghttp2/nghttp2.h>
+
+/*******************************************************************************
+ * some debugging/format helpers
+ ******************************************************************************/
+struct h2_request;
+struct nghttp2_frame;
+
+size_t h2_util_hex_dump(char *buffer, size_t maxlen,
+ const char *data, size_t datalen);
+
+size_t h2_util_header_print(char *buffer, size_t maxlen,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen);
+
+void h2_util_camel_case_header(char *s, size_t len);
+
+int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+
+/*******************************************************************************
+ * ihash - hash for structs with int identifier
+ ******************************************************************************/
+typedef struct h2_ihash_t h2_ihash_t;
+typedef int h2_ihash_iter_t(void *ctx, void *val);
+
+/**
+ * Create a hash for structures that have an identifying int member.
+ * @param pool the pool to use
+ * @param offset_of_int the offsetof() the int member in the struct
+ */
+h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+
+size_t h2_ihash_count(h2_ihash_t *ih);
+int h2_ihash_empty(h2_ihash_t *ih);
+void *h2_ihash_get(h2_ihash_t *ih, int id);
+
+/**
+ * Iterate over the hash members (without defined order) and invoke
+ * fn for each member until 0 is returned.
+ * @param ih the hash to iterate over
+ * @param fn the function to invoke on each member
+ * @param ctx user supplied data passed into each iteration call
+ * @return 0 if one iteration returned 0, otherwise != 0
+ */
+int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx);
+
+void h2_ihash_add(h2_ihash_t *ih, void *val);
+void h2_ihash_remove(h2_ihash_t *ih, int id);
+void h2_ihash_remove_val(h2_ihash_t *ih, void *val);
+void h2_ihash_clear(h2_ihash_t *ih);
+
+size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
+size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max);
+
+/*******************************************************************************
+ * iqueue - sorted list of int with user defined ordering
+ ******************************************************************************/
+typedef struct h2_iqueue {
+ int *elts;
+ int head;
+ int nelts;
+ int nalloc;
+ apr_pool_t *pool;
+} h2_iqueue;
+
+/**
+ * Comparator for two int to determine their order.
+ *
+ * @param i1 first int to compare
+ * @param i2 second int to compare
+ * @param ctx provided user data
+ * @return value is the same as for strcmp() and has the effect:
+ * == 0: s1 and s2 are treated equal in ordering
+ * < 0: s1 should be sorted before s2
+ * > 0: s2 should be sorted before s1
+ */
+typedef int h2_iq_cmp(int i1, int i2, void *ctx);
+
+/**
+ * Allocate a new queue from the pool and initialize.
+ * @param id the identifier of the queue
+ * @param pool the memory pool
+ */
+h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
+
+/**
+ * Return != 0 iff there are no tasks in the queue.
+ * @param q the queue to check
+ */
+int h2_iq_empty(h2_iqueue *q);
+
+/**
+ * Return the number of int in the queue.
+ * @param q the queue to get size on
+ */
+int h2_iq_count(h2_iqueue *q);
+
+/**
+ * Add a stream id to the queue.
+ *
+ * @param q the queue to append the task to
+ * @param sid the stream id to add
+ * @param cmp the comparator for sorting
+ * @param ctx user data for comparator
+ */
+void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Remove the stream id from the queue. Return != 0 iff task
+ * was found in queue.
+ * @param q the task queue
+ * @param sid the stream id to remove
+ * @return != 0 iff task was found in queue
+ */
+int h2_iq_remove(h2_iqueue *q, int sid);
+
+/**
+ * Remove all entries in the queue.
+ */
+void h2_iq_clear(h2_iqueue *q);
+
+/**
+ * Sort the stream idqueue again. Call if the task ordering
+ * has changed.
+ *
+ * @param q the queue to sort
+ * @param cmp the comparator for sorting
+ * @param ctx user data for the comparator
+ */
+void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Get the first stream id from the queue or NULL if the queue is empty.
+ * The task will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @return the first stream id of the queue, 0 if empty
+ */
+int h2_iq_shift(h2_iqueue *q);
+
+/*******************************************************************************
+ * common helpers
+ ******************************************************************************/
+/* h2_log2(n) iff n is a power of 2 */
+unsigned char h2_log2(apr_uint32_t n);
+
+/**
+ * Count the bytes that all key/value pairs in a table have
+ * in length (exlucding terminating 0s), plus additional extra per pair.
+ *
+ * @param t the table to inspect
+ * @param pair_extra the extra amount to add per pair
+ * @return the number of bytes all key/value pairs have
+ */
+apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra);
+
+/**
+ * Return != 0 iff the string s contains the token, as specified in
+ * HTTP header syntax, rfc7230.
+ */
+int h2_util_contains_token(apr_pool_t *pool, const char *s, const char *token);
+
+const char *h2_util_first_token_match(apr_pool_t *pool, const char *s,
+ const char *tokens[], apr_size_t len);
+
+/** Match a header value against a string constance, case insensitive */
+#define H2_HD_MATCH_LIT(l, name, nlen) \
+ ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+/*******************************************************************************
+ * HTTP/2 header helpers
+ ******************************************************************************/
+int h2_req_ignore_header(const char *name, size_t len);
+int h2_req_ignore_trailer(const char *name, size_t len);
+int h2_res_ignore_trailer(const char *name, size_t len);
+int h2_proxy_res_ignore_header(const char *name, size_t len);
+
+/**
+ * Set the push policy for the given request. Takes request headers into
+ * account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00
+ * for details.
+ *
+ * @param req the request to determine the policy for
+ * @param p the pool to use
+ * @param push_enabled if HTTP/2 server push is generally enabled for this request
+ */
+void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled);
+
+/*******************************************************************************
+ * base64 url encoding, different table from normal base64
+ ******************************************************************************/
+/**
+ * I always wanted to write my own base64url decoder...not. See
+ * https://tools.ietf.org/html/rfc4648#section-5 for description.
+ */
+apr_size_t h2_util_base64url_decode(const char **decoded,
+ const char *encoded,
+ apr_pool_t *pool);
+const char *h2_util_base64url_encode(const char *data,
+ apr_size_t len, apr_pool_t *pool);
+
+/*******************************************************************************
+ * nghttp2 helpers
+ ******************************************************************************/
+
+#define H2_HD_MATCH_LIT_CS(l, name) \
+ ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
+
+#define H2_CREATE_NV_LIT_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \
+ nv->namelen = sizeof(NAME) - 1; \
+ nv->value = (uint8_t *)VALUE; \
+ nv->valuelen = strlen(VALUE)
+
+#define H2_CREATE_NV_CS_LIT(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \
+ nv->namelen = strlen(NAME); \
+ nv->value = (uint8_t *)VALUE; \
+ nv->valuelen = sizeof(VALUE) - 1
+
+#define H2_CREATE_NV_CS_CS(nv, NAME, VALUE) nv->name = (uint8_t *)NAME; \
+ nv->namelen = strlen(NAME); \
+ nv->value = (uint8_t *)VALUE; \
+ nv->valuelen = strlen(VALUE)
+
+int h2_util_ignore_header(const char *name);
+
+typedef struct h2_ngheader {
+ nghttp2_nv *nv;
+ apr_size_t nvlen;
+} h2_ngheader;
+
+h2_ngheader *h2_util_ngheader_make(apr_pool_t *p, apr_table_t *header);
+h2_ngheader *h2_util_ngheader_make_res(apr_pool_t *p,
+ int http_status,
+ apr_table_t *header);
+h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
+ const struct h2_request *req);
+
+apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
+
+/*******************************************************************************
+ * h2_request helpers
+ ******************************************************************************/
+
+struct h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header,
+ int serialize);
+struct h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize);
+
+apr_status_t h2_req_make(struct h2_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers);
+
+/*******************************************************************************
+ * apr brigade helpers
+ ******************************************************************************/
+
+/**
+ * Concatenate at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
+ */
+apr_status_t h2_brigade_concat_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
+/**
+ * Copy at most length bytes from src to dest brigade, splitting
+ * buckets if necessary and reading buckets of indeterminate length.
+ */
+apr_status_t h2_brigade_copy_length(apr_bucket_brigade *dest,
+ apr_bucket_brigade *src,
+ apr_off_t length);
+
+/**
+ * Return != 0 iff there is a FLUSH or EOS bucket in the brigade.
+ * @param bb the brigade to check on
+ * @return != 0 iff brigade holds FLUSH or EOS bucket (or both)
+ */
+int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len);
+
+/**
+ * Check how many bytes of the desired amount are available and if the
+ * end of stream is reached by that amount.
+ * @param bb the brigade to check
+ * @param plen the desired length and, on return, the available length
+ * @param on return, if eos has been reached
+ */
+apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos);
+
+typedef apr_status_t h2_util_pass_cb(void *ctx,
+ const char *data, apr_off_t len);
+
+/**
+ * Read at most *plen bytes from the brigade and pass them into the
+ * given callback. If cb is NULL, just return the amount of data that
+ * could have been read.
+ * If an EOS was/would be encountered, set *peos != 0.
+ * @param bb the brigade to read from
+ * @param cb the callback to invoke for the read data
+ * @param ctx optional data passed to callback
+ * @param plen inout, as input gives the maximum number of bytes to read,
+ * on return specifies the actual/would be number of bytes
+ * @param peos != 0 iff an EOS bucket was/would be encountered.
+ */
+apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
+ h2_util_pass_cb *cb, void *ctx,
+ apr_off_t *plen, int *peos);
+
+/**
+ * Print a bucket's meta data (type and length) to the buffer.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+ apr_bucket *b, const char *sep);
+
+/**
+ * Prints the brigade bucket types and lengths into the given buffer
+ * up to bmax.
+ * @return number of characters printed
+ */
+apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
+ const char *tag, const char *sep,
+ apr_bucket_brigade *bb);
+/**
+ * Logs the bucket brigade (which bucket types with what length)
+ * to the log at the given level.
+ * @param c the connection to log for
+ * @param sid the stream identifier this brigade belongs to
+ * @param level the log level (as in APLOG_*)
+ * @param tag a short message text about the context
+ * @param bb the brigade to log
+ */
+#define h2_util_bb_log(c, sid, level, tag, bb) \
+do { \
+ char buffer[4 * 1024]; \
+ const char *line = "(null)"; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \
+ ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld-%d): %s", \
+ (c)->id, (int)(sid), (len? buffer : line)); \
+} while(0)
+
+
+/**
+ * Transfer buckets from one brigade to another with a limit on the
+ * maximum amount of bytes transfered. Does no setaside magic, lifetime
+ * of brigades must fit.
+ * @param to brigade to transfer buckets to
+ * @param from brigades to remove buckets from
+ * @param plen maximum bytes to transfer, actual bytes transferred
+ * @param peos if an EOS bucket was transferred
+ */
+apr_status_t h2_append_brigade(apr_bucket_brigade *to,
+ apr_bucket_brigade *from,
+ apr_off_t *plen,
+ int *peos);
+
+/**
+ * Get an approximnation of the memory footprint of the given
+ * brigade. This varies from apr_brigade_length as
+ * - no buckets are ever read
+ * - only buckets known to allocate memory (HEAP+POOL) are counted
+ * - the bucket struct itself is counted
+ */
+apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb);
+
+#endif /* defined(__mod_h2__h2_util__) */
diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h
new file mode 100644
index 00000000..abf69c1d
--- /dev/null
+++ b/modules/http2/h2_version.h
@@ -0,0 +1,40 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef mod_h2_h2_version_h
+#define mod_h2_h2_version_h
+
+#undef PACKAGE_VERSION
+#undef PACKAGE_TARNAME
+#undef PACKAGE_STRING
+#undef PACKAGE_NAME
+#undef PACKAGE_BUGREPORT
+
+/**
+ * @macro
+ * Version number of the http2 module as c string
+ */
+#define MOD_HTTP2_VERSION "1.5.11"
+
+/**
+ * @macro
+ * Numerical representation of the version number of the http2 module
+ * release. This is a 24 bit number with 8 bits for major number, 8 bits
+ * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
+ */
+#define MOD_HTTP2_VERSION_NUM 0x01050b
+
+
+#endif /* mod_h2_h2_version_h */
diff --git a/modules/http2/h2_worker.c b/modules/http2/h2_worker.c
new file mode 100644
index 00000000..44feac14
--- /dev/null
+++ b/modules/http2/h2_worker.c
@@ -0,0 +1,103 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include <apr_thread_cond.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "h2.h"
+#include "h2_private.h"
+#include "h2_conn.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_task.h"
+#include "h2_worker.h"
+
+static void* APR_THREAD_FUNC execute(apr_thread_t *thread, void *wctx)
+{
+ h2_worker *worker = (h2_worker *)wctx;
+ int sticky;
+
+ while (!worker->aborted) {
+ h2_task *task;
+
+ /* Get a h2_task from the main workers queue. */
+ worker->get_next(worker, worker->ctx, &task, &sticky);
+ while (task) {
+
+ h2_task_do(task, thread);
+ /* report the task done and maybe get another one from the same
+ * mplx (= master connection), if we can be sticky.
+ */
+ if (sticky && !worker->aborted) {
+ h2_mplx_task_done(task->mplx, task, &task);
+ }
+ else {
+ h2_mplx_task_done(task->mplx, task, NULL);
+ task = NULL;
+ }
+ }
+ }
+
+ worker->worker_done(worker, worker->ctx);
+ return NULL;
+}
+
+h2_worker *h2_worker_create(int id,
+ apr_pool_t *pool,
+ apr_threadattr_t *attr,
+ h2_worker_mplx_next_fn *get_next,
+ h2_worker_done_fn *worker_done,
+ void *ctx)
+{
+ h2_worker *w = apr_pcalloc(pool, sizeof(h2_worker));
+ if (w) {
+ w->id = id;
+ APR_RING_ELEM_INIT(w, link);
+ w->get_next = get_next;
+ w->worker_done = worker_done;
+ w->ctx = ctx;
+ apr_thread_create(&w->thread, attr, execute, w, pool);
+ }
+ return w;
+}
+
+apr_status_t h2_worker_destroy(h2_worker *worker)
+{
+ if (worker->thread) {
+ apr_status_t status;
+ apr_thread_join(&status, worker->thread);
+ worker->thread = NULL;
+ }
+ return APR_SUCCESS;
+}
+
+void h2_worker_abort(h2_worker *worker)
+{
+ worker->aborted = 1;
+}
+
+int h2_worker_is_aborted(h2_worker *worker)
+{
+ return worker->aborted;
+}
+
+
diff --git a/modules/http2/h2_worker.h b/modules/http2/h2_worker.h
new file mode 100644
index 00000000..04ff5703
--- /dev/null
+++ b/modules/http2/h2_worker.h
@@ -0,0 +1,135 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_worker__
+#define __mod_h2__h2_worker__
+
+struct h2_mplx;
+struct h2_request;
+struct h2_task;
+
+/* h2_worker is a basically a apr_thread_t that reads fromt he h2_workers
+ * task queue and runs h2_tasks it is given.
+ */
+typedef struct h2_worker h2_worker;
+
+/* Invoked when the worker wants a new task to process. Will block
+ * until a h2_mplx becomes available or the worker itself
+ * gets aborted (idle timeout, for example). */
+typedef apr_status_t h2_worker_mplx_next_fn(h2_worker *worker,
+ void *ctx,
+ struct h2_task **ptask,
+ int *psticky);
+
+/* Invoked just before the worker thread exits. */
+typedef void h2_worker_done_fn(h2_worker *worker, void *ctx);
+
+
+struct h2_worker {
+ int id;
+ /** Links to the rest of the workers */
+ APR_RING_ENTRY(h2_worker) link;
+ apr_thread_t *thread;
+ h2_worker_mplx_next_fn *get_next;
+ h2_worker_done_fn *worker_done;
+ void *ctx;
+ int aborted;
+};
+
+/**
+ * The magic pointer value that indicates the head of a h2_worker list
+ * @param b The worker list
+ * @return The magic pointer value
+ */
+#define H2_WORKER_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_worker, link)
+
+/**
+ * Determine if the worker list is empty
+ * @param b The list to check
+ * @return true or false
+ */
+#define H2_WORKER_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_worker, link)
+
+/**
+ * Return the first worker in a list
+ * @param b The list to query
+ * @return The first worker in the list
+ */
+#define H2_WORKER_LIST_FIRST(b) APR_RING_FIRST(b)
+
+/**
+ * Return the last worker in a list
+ * @param b The list to query
+ * @return The last worker int he list
+ */
+#define H2_WORKER_LIST_LAST(b) APR_RING_LAST(b)
+
+/**
+ * Insert a single worker at the front of a list
+ * @param b The list to add to
+ * @param e The worker to insert
+ */
+#define H2_WORKER_LIST_INSERT_HEAD(b, e) do { \
+ h2_worker *ap__b = (e); \
+ APR_RING_INSERT_HEAD((b), ap__b, h2_worker, link); \
+ } while (0)
+
+/**
+ * Insert a single worker at the end of a list
+ * @param b The list to add to
+ * @param e The worker to insert
+ */
+#define H2_WORKER_LIST_INSERT_TAIL(b, e) do { \
+ h2_worker *ap__b = (e); \
+ APR_RING_INSERT_TAIL((b), ap__b, h2_worker, link); \
+ } while (0)
+
+/**
+ * Get the next worker in the list
+ * @param e The current worker
+ * @return The next worker
+ */
+#define H2_WORKER_NEXT(e) APR_RING_NEXT((e), link)
+/**
+ * Get the previous worker in the list
+ * @param e The current worker
+ * @return The previous worker
+ */
+#define H2_WORKER_PREV(e) APR_RING_PREV((e), link)
+
+/**
+ * Remove a worker from its list
+ * @param e The worker to remove
+ */
+#define H2_WORKER_REMOVE(e) APR_RING_REMOVE((e), link)
+
+
+/* Create a new worker with given id, pool and attributes, callbacks
+ * callback parameter.
+ */
+h2_worker *h2_worker_create(int id,
+ apr_pool_t *pool,
+ apr_threadattr_t *attr,
+ h2_worker_mplx_next_fn *get_next,
+ h2_worker_done_fn *worker_done,
+ void *ctx);
+
+apr_status_t h2_worker_destroy(h2_worker *worker);
+
+void h2_worker_abort(h2_worker *worker);
+
+int h2_worker_is_aborted(h2_worker *worker);
+
+#endif /* defined(__mod_h2__h2_worker__) */
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
new file mode 100644
index 00000000..2a159991
--- /dev/null
+++ b/modules/http2/h2_workers.c
@@ -0,0 +1,416 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_atomic.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+
+#include <mpm_common.h>
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+
+#include "h2.h"
+#include "h2_private.h"
+#include "h2_mplx.h"
+#include "h2_task.h"
+#include "h2_worker.h"
+#include "h2_workers.h"
+
+
+static int in_list(h2_workers *workers, h2_mplx *m)
+{
+ h2_mplx *e;
+ for (e = H2_MPLX_LIST_FIRST(&workers->mplxs);
+ e != H2_MPLX_LIST_SENTINEL(&workers->mplxs);
+ e = H2_MPLX_NEXT(e)) {
+ if (e == m) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void cleanup_zombies(h2_workers *workers, int lock)
+{
+ if (lock) {
+ apr_thread_mutex_lock(workers->lock);
+ }
+ while (!H2_WORKER_LIST_EMPTY(&workers->zombies)) {
+ h2_worker *zombie = H2_WORKER_LIST_FIRST(&workers->zombies);
+ H2_WORKER_REMOVE(zombie);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_workers: cleanup zombie %d", zombie->id);
+ h2_worker_destroy(zombie);
+ }
+ if (lock) {
+ apr_thread_mutex_unlock(workers->lock);
+ }
+}
+
+static h2_task *next_task(h2_workers *workers)
+{
+ h2_task *task = NULL;
+ h2_mplx *last = NULL;
+ int has_more;
+
+ /* Get the next h2_mplx to process that has a task to hand out.
+ * If it does, place it at the end of the queu and return the
+ * task to the worker.
+ * If it (currently) has no tasks, remove it so that it needs
+ * to register again for scheduling.
+ * If we run out of h2_mplx in the queue, we need to wait for
+ * new mplx to arrive. Depending on how many workers do exist,
+ * we do a timed wait or block indefinitely.
+ */
+ while (!task && !H2_MPLX_LIST_EMPTY(&workers->mplxs)) {
+ h2_mplx *m = H2_MPLX_LIST_FIRST(&workers->mplxs);
+
+ if (last == m) {
+ break;
+ }
+ H2_MPLX_REMOVE(m);
+ --workers->mplx_count;
+
+ task = h2_mplx_pop_task(m, &has_more);
+ if (has_more) {
+ H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m);
+ ++workers->mplx_count;
+ if (!last) {
+ last = m;
+ }
+ }
+ }
+ return task;
+}
+
+/**
+ * Get the next task for the given worker. Will block until a task arrives
+ * or the max_wait timer expires and more than min workers exist.
+ */
+static apr_status_t get_mplx_next(h2_worker *worker, void *ctx,
+ h2_task **ptask, int *psticky)
+{
+ apr_status_t status;
+ apr_time_t wait_until = 0, now;
+ h2_workers *workers = ctx;
+ h2_task *task = NULL;
+
+ *ptask = NULL;
+ *psticky = 0;
+
+ status = apr_thread_mutex_lock(workers->lock);
+ if (status == APR_SUCCESS) {
+ ++workers->idle_workers;
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_worker(%d): looking for work", worker->id);
+
+ while (!h2_worker_is_aborted(worker) && !workers->aborted
+ && !(task = next_task(workers))) {
+
+ /* Need to wait for a new tasks to arrive. If we are above
+ * minimum workers, we do a timed wait. When timeout occurs
+ * and we have still more workers, we shut down one after
+ * the other. */
+ cleanup_zombies(workers, 0);
+ if (workers->worker_count > workers->min_workers) {
+ now = apr_time_now();
+ if (now >= wait_until) {
+ wait_until = now + apr_time_from_sec(workers->max_idle_secs);
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_worker(%d): waiting signal, "
+ "workers=%d, idle=%d", worker->id,
+ (int)workers->worker_count,
+ workers->idle_workers);
+ status = apr_thread_cond_timedwait(workers->mplx_added,
+ workers->lock,
+ wait_until - now);
+ if (status == APR_TIMEUP
+ && workers->worker_count > workers->min_workers) {
+ /* waited long enough without getting a task and
+ * we are above min workers, abort this one. */
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0,
+ workers->s,
+ "h2_workers: aborting idle worker");
+ h2_worker_abort(worker);
+ break;
+ }
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_worker(%d): waiting signal (eternal), "
+ "worker_count=%d, idle=%d", worker->id,
+ (int)workers->worker_count,
+ workers->idle_workers);
+ apr_thread_cond_wait(workers->mplx_added, workers->lock);
+ }
+ }
+
+ /* Here, we either have gotten task or decided to shut down
+ * the calling worker.
+ */
+ if (task) {
+ /* Ok, we got something to give back to the worker for execution.
+ * If we have more idle workers than h2_mplx in our queue, then
+ * we let the worker be sticky, e.g. making it poll the task's
+ * h2_mplx instance for more work before asking back here.
+ * This avoids entering our global lock as long as enough idle
+ * workers remain. Stickiness of a worker ends when the connection
+ * has no new tasks to process, so the worker will get back here
+ * eventually.
+ */
+ *ptask = task;
+ *psticky = (workers->max_workers >= workers->mplx_count);
+
+ if (workers->mplx_count && workers->idle_workers > 1) {
+ apr_thread_cond_signal(workers->mplx_added);
+ }
+ }
+
+ --workers->idle_workers;
+ apr_thread_mutex_unlock(workers->lock);
+ }
+
+ return *ptask? APR_SUCCESS : APR_EOF;
+}
+
+static void worker_done(h2_worker *worker, void *ctx)
+{
+ h2_workers *workers = ctx;
+ apr_status_t status = apr_thread_mutex_lock(workers->lock);
+ if (status == APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_worker(%d): done", worker->id);
+ H2_WORKER_REMOVE(worker);
+ --workers->worker_count;
+ H2_WORKER_LIST_INSERT_TAIL(&workers->zombies, worker);
+
+ apr_thread_mutex_unlock(workers->lock);
+ }
+}
+
+static apr_status_t add_worker(h2_workers *workers)
+{
+ h2_worker *w = h2_worker_create(workers->next_worker_id++,
+ workers->pool, workers->thread_attr,
+ get_mplx_next, worker_done, workers);
+ if (!w) {
+ return APR_ENOMEM;
+ }
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_workers: adding worker(%d)", w->id);
+ ++workers->worker_count;
+ H2_WORKER_LIST_INSERT_TAIL(&workers->workers, w);
+ return APR_SUCCESS;
+}
+
+static apr_status_t h2_workers_start(h2_workers *workers)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->lock);
+ if (status == APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_workers: starting");
+
+ while (workers->worker_count < workers->min_workers
+ && status == APR_SUCCESS) {
+ status = add_worker(workers);
+ }
+ apr_thread_mutex_unlock(workers->lock);
+ }
+ return status;
+}
+
+h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool,
+ int min_workers, int max_workers,
+ apr_size_t max_tx_handles)
+{
+ apr_status_t status;
+ h2_workers *workers;
+ apr_pool_t *pool;
+
+ AP_DEBUG_ASSERT(s);
+ AP_DEBUG_ASSERT(server_pool);
+
+ /* let's have our own pool that will be parent to all h2_worker
+ * instances we create. This happens in various threads, but always
+ * guarded by our lock. Without this pool, all subpool creations would
+ * happen on the pool handed to us, which we do not guard.
+ */
+ apr_pool_create(&pool, server_pool);
+ apr_pool_tag(pool, "h2_workers");
+ workers = apr_pcalloc(pool, sizeof(h2_workers));
+ if (workers) {
+ workers->s = s;
+ workers->pool = pool;
+ workers->min_workers = min_workers;
+ workers->max_workers = max_workers;
+ workers->max_idle_secs = 10;
+
+ workers->max_tx_handles = max_tx_handles;
+ workers->spare_tx_handles = workers->max_tx_handles;
+
+ apr_threadattr_create(&workers->thread_attr, workers->pool);
+ if (ap_thread_stacksize != 0) {
+ apr_threadattr_stacksize_set(workers->thread_attr,
+ ap_thread_stacksize);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: using stacksize=%ld",
+ (long)ap_thread_stacksize);
+ }
+
+ APR_RING_INIT(&workers->workers, h2_worker, link);
+ APR_RING_INIT(&workers->zombies, h2_worker, link);
+ APR_RING_INIT(&workers->mplxs, h2_mplx, link);
+
+ status = apr_thread_mutex_create(&workers->lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ if (status == APR_SUCCESS) {
+ status = apr_thread_cond_create(&workers->mplx_added, workers->pool);
+ }
+
+ if (status == APR_SUCCESS) {
+ status = apr_thread_mutex_create(&workers->tx_lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ }
+
+ if (status == APR_SUCCESS) {
+ status = h2_workers_start(workers);
+ }
+
+ if (status != APR_SUCCESS) {
+ h2_workers_destroy(workers);
+ workers = NULL;
+ }
+ }
+ return workers;
+}
+
+void h2_workers_destroy(h2_workers *workers)
+{
+ /* before we go, cleanup any zombie workers that may have accumulated */
+ cleanup_zombies(workers, 1);
+
+ if (workers->mplx_added) {
+ apr_thread_cond_destroy(workers->mplx_added);
+ workers->mplx_added = NULL;
+ }
+ if (workers->lock) {
+ apr_thread_mutex_destroy(workers->lock);
+ workers->lock = NULL;
+ }
+ while (!H2_MPLX_LIST_EMPTY(&workers->mplxs)) {
+ h2_mplx *m = H2_MPLX_LIST_FIRST(&workers->mplxs);
+ H2_MPLX_REMOVE(m);
+ }
+ while (!H2_WORKER_LIST_EMPTY(&workers->workers)) {
+ h2_worker *w = H2_WORKER_LIST_FIRST(&workers->workers);
+ H2_WORKER_REMOVE(w);
+ }
+ if (workers->pool) {
+ apr_pool_destroy(workers->pool);
+ /* workers is gone */
+ }
+}
+
+apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->lock);
+ if (status == APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, status, workers->s,
+ "h2_workers: register mplx(%ld), idle=%d",
+ m->id, workers->idle_workers);
+ if (in_list(workers, m)) {
+ status = APR_EAGAIN;
+ }
+ else {
+ H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m);
+ ++workers->mplx_count;
+ status = APR_SUCCESS;
+ }
+
+ if (workers->idle_workers > 0) {
+ apr_thread_cond_signal(workers->mplx_added);
+ }
+ else if (status == APR_SUCCESS
+ && workers->worker_count < workers->max_workers) {
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
+ "h2_workers: got %d worker, adding 1",
+ workers->worker_count);
+ add_worker(workers);
+ }
+ apr_thread_mutex_unlock(workers->lock);
+ }
+ return status;
+}
+
+apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->lock);
+ if (status == APR_SUCCESS) {
+ status = APR_EAGAIN;
+ if (in_list(workers, m)) {
+ H2_MPLX_REMOVE(m);
+ status = APR_SUCCESS;
+ }
+ apr_thread_mutex_unlock(workers->lock);
+ }
+ return status;
+}
+
+void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs)
+{
+ if (idle_secs <= 0) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, workers->s,
+ APLOGNO(02962) "h2_workers: max_worker_idle_sec value of %d"
+ " is not valid, ignored.", idle_secs);
+ return;
+ }
+ workers->max_idle_secs = idle_secs;
+}
+
+apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
+ if (status == APR_SUCCESS) {
+ count = H2MIN(workers->spare_tx_handles, count);
+ workers->spare_tx_handles -= count;
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: reserved %d tx handles, %d/%d left",
+ (int)count, (int)workers->spare_tx_handles,
+ (int)workers->max_tx_handles);
+ apr_thread_mutex_unlock(workers->tx_lock);
+ return count;
+ }
+ return 0;
+}
+
+void h2_workers_tx_free(h2_workers *workers, apr_size_t count)
+{
+ apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
+ if (status == APR_SUCCESS) {
+ workers->spare_tx_handles += count;
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: freed %d tx handles, %d/%d left",
+ (int)count, (int)workers->spare_tx_handles,
+ (int)workers->max_tx_handles);
+ apr_thread_mutex_unlock(workers->tx_lock);
+ }
+}
+
diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h
new file mode 100644
index 00000000..ae7b4d89
--- /dev/null
+++ b/modules/http2/h2_workers.h
@@ -0,0 +1,120 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_workers__
+#define __mod_h2__h2_workers__
+
+/* Thread pool specific to executing h2_tasks. Has a minimum and maximum
+ * number of workers it creates. Starts with minimum workers and adds
+ * some on load, reduces the number again when idle.
+ *
+ */
+struct apr_thread_mutex_t;
+struct apr_thread_cond_t;
+struct h2_mplx;
+struct h2_request;
+struct h2_task;
+
+typedef struct h2_workers h2_workers;
+
+struct h2_workers {
+ server_rec *s;
+ apr_pool_t *pool;
+
+ int next_worker_id;
+ int min_workers;
+ int max_workers;
+ int worker_count;
+ int idle_workers;
+ int max_idle_secs;
+
+ apr_size_t max_tx_handles;
+ apr_size_t spare_tx_handles;
+
+ unsigned int aborted : 1;
+
+ apr_threadattr_t *thread_attr;
+
+ APR_RING_HEAD(h2_worker_list, h2_worker) workers;
+ APR_RING_HEAD(h2_worker_zombies, h2_worker) zombies;
+ APR_RING_HEAD(h2_mplx_list, h2_mplx) mplxs;
+ int mplx_count;
+
+ struct apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *mplx_added;
+
+ struct apr_thread_mutex_t *tx_lock;
+};
+
+
+/* Create a worker pool with the given minimum and maximum number of
+ * threads.
+ */
+h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool,
+ int min_size, int max_size,
+ apr_size_t max_tx_handles);
+
+/* Destroy the worker pool and all its threads.
+ */
+void h2_workers_destroy(h2_workers *workers);
+
+/**
+ * Registers a h2_mplx for task scheduling. If this h2_mplx runs
+ * out of tasks, it will be automatically be unregistered. Should
+ * new tasks arrive, it needs to be registered again.
+ */
+apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m);
+
+/**
+ * Remove a h2_mplx from the worker registry.
+ */
+apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m);
+
+/**
+ * Set the amount of seconds a h2_worker should wait for new tasks
+ * before shutting down (if there are more than the minimum number of
+ * workers).
+ */
+void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs);
+
+/**
+ * Reservation of file handles available for transfer between workers
+ * and master connections.
+ *
+ * When handling output from request processing, file handles are often
+ * encountered when static files are served. The most efficient way is then
+ * to forward the handle itself to the master connection where it can be
+ * read or sendfile'd to the client. But file handles are a scarce resource,
+ * so there needs to be a limit on how many handles are transferred this way.
+ *
+ * h2_workers keeps track of the number of reserved handles and observes a
+ * configurable maximum value.
+ *
+ * @param workers the workers instance
+ * @param count how many handles the caller wishes to reserve
+ * @return the number of reserved handles, may be 0.
+ */
+apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count);
+
+/**
+ * Return a number of reserved file handles back to the pool. The number
+ * overall may not exceed the numbers reserved.
+ * @param workers the workers instance
+ * @param count how many handles are returned to the pool
+ */
+void h2_workers_tx_free(h2_workers *workers, apr_size_t count);
+
+#endif /* defined(__mod_h2__h2_workers__) */
diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
new file mode 100644
index 00000000..480917a4
--- /dev/null
+++ b/modules/http2/mod_http2.c
@@ -0,0 +1,356 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_optional.h>
+#include <apr_optional_hooks.h>
+#include <apr_strings.h>
+#include <apr_time.h>
+#include <apr_want.h>
+
+#include <httpd.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+
+#include "mod_http2.h"
+
+#include <nghttp2/nghttp2.h>
+#include "h2_stream.h"
+#include "h2_alt_svc.h"
+#include "h2_conn.h"
+#include "h2_filter.h"
+#include "h2_task.h"
+#include "h2_session.h"
+#include "h2_config.h"
+#include "h2_ctx.h"
+#include "h2_h2.h"
+#include "h2_mplx.h"
+#include "h2_push.h"
+#include "h2_request.h"
+#include "h2_switch.h"
+#include "h2_version.h"
+
+
+static void h2_hooks(apr_pool_t *pool);
+
+AP_DECLARE_MODULE(http2) = {
+ STANDARD20_MODULE_STUFF,
+ NULL,
+ NULL,
+ h2_config_create_svr, /* func to create per server config */
+ h2_config_merge, /* func to merge per server config */
+ h2_cmds, /* command handlers */
+ h2_hooks
+};
+
+static int h2_h2_fixups(request_rec *r);
+
+typedef struct {
+ unsigned int change_prio : 1;
+ unsigned int sha256 : 1;
+} features;
+
+static features myfeats;
+
+/* The module initialization. Called once as apache hook, before any multi
+ * processing (threaded or not) happens. It is typically at least called twice,
+ * see
+ * http://wiki.apache.org/httpd/ModuleLife
+ * Since the first run is just a "practise" run, we want to initialize for real
+ * only on the second try. This defeats the purpose of the first dry run a bit,
+ * since apache wants to verify that a new configuration actually will work.
+ * So if we have trouble with the configuration, this will only be detected
+ * when the server has already switched.
+ * On the other hand, when we initialize lib nghttp2, all possible crazy things
+ * might happen and this might even eat threads. So, better init on the real
+ * invocation, for now at least.
+ */
+static int h2_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data = NULL;
+ const char *mod_h2_init_key = "mod_http2_init_counter";
+ nghttp2_info *ngh2;
+ apr_status_t status;
+ const char *sep = "";
+
+ (void)plog;(void)ptemp;
+#ifdef H2_NG2_CHANGE_PRIO
+ myfeats.change_prio = 1;
+ sep = "+";
+#endif
+#ifdef H2_OPENSSL
+ myfeats.sha256 = 1;
+#endif
+
+ apr_pool_userdata_get(&data, mod_h2_init_key, s->process->pool);
+ if ( data == NULL ) {
+ ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03089)
+ "initializing post config dry run");
+ apr_pool_userdata_set((const void *)1, mod_h2_init_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return APR_SUCCESS;
+ }
+
+ ngh2 = nghttp2_version(0);
+ ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03090)
+ "mod_http2 (v%s, feats=%s%s%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION,
+ myfeats.change_prio? "CHPRIO" : "", sep,
+ myfeats.sha256? "SHA256" : "",
+ ngh2? ngh2->version_str : "unknown");
+
+ switch (h2_conn_mpm_type()) {
+ case H2_MPM_SIMPLE:
+ case H2_MPM_MOTORZ:
+ case H2_MPM_NETWARE:
+ case H2_MPM_WINNT:
+ /* not sure we need something extra for those. */
+ break;
+ case H2_MPM_EVENT:
+ case H2_MPM_WORKER:
+ /* all fine, we know these ones */
+ break;
+ case H2_MPM_PREFORK:
+ /* ok, we now know how to handle that one */
+ break;
+ case H2_MPM_UNKNOWN:
+ /* ??? */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03091)
+ "post_config: mpm type unknown");
+ break;
+ }
+
+ status = h2_h2_init(p, s);
+ if (status == APR_SUCCESS) {
+ status = h2_switch_init(p, s);
+ }
+ if (status == APR_SUCCESS) {
+ status = h2_task_init(p, s);
+ }
+
+ return status;
+}
+
+static char *http2_var_lookup(apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *, char *name);
+static int http2_is_h2(conn_rec *);
+
+static apr_status_t http2_req_engine_push(const char *ngn_type,
+ request_rec *r,
+ http2_req_engine_init *einit)
+{
+ return h2_mplx_req_engine_push(ngn_type, r, einit);
+}
+
+static apr_status_t http2_req_engine_pull(h2_req_engine *ngn,
+ apr_read_type_e block,
+ apr_uint32_t capacity,
+ request_rec **pr)
+{
+ return h2_mplx_req_engine_pull(ngn, block, capacity, pr);
+}
+
+static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn)
+{
+ h2_mplx_req_engine_done(ngn, r_conn);
+}
+
+/* Runs once per created child process. Perform any process
+ * related initionalization here.
+ */
+static void h2_child_init(apr_pool_t *pool, server_rec *s)
+{
+ /* Set up our connection processing */
+ apr_status_t status = h2_conn_child_init(pool, s);
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ APLOGNO(02949) "initializing connection handling");
+ }
+
+}
+
+/* Install this module into the apache2 infrastructure.
+ */
+static void h2_hooks(apr_pool_t *pool)
+{
+ static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
+
+ APR_REGISTER_OPTIONAL_FN(http2_is_h2);
+ APR_REGISTER_OPTIONAL_FN(http2_var_lookup);
+ APR_REGISTER_OPTIONAL_FN(http2_req_engine_push);
+ APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull);
+ APR_REGISTER_OPTIONAL_FN(http2_req_engine_done);
+
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
+
+ /* Run once after configuration is set, but before mpm children initialize.
+ */
+ ap_hook_post_config(h2_post_config, mod_ssl, NULL, APR_HOOK_MIDDLE);
+
+ /* Run once after a child process has been created.
+ */
+ ap_hook_child_init(h2_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+
+ h2_h2_register_hooks();
+ h2_switch_register_hooks();
+ h2_task_register_hooks();
+
+ h2_alt_svc_register_hooks();
+
+ /* Setup subprocess env for certain variables
+ */
+ ap_hook_fixups(h2_h2_fixups, NULL,NULL, APR_HOOK_MIDDLE);
+
+ /* test http2 connection status handler */
+ ap_hook_handler(h2_filter_h2_status_handler, NULL, NULL, APR_HOOK_MIDDLE);
+}
+
+static const char *val_HTTP2(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ return ctx? "on" : "off";
+}
+
+static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ if (r) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task && task->request->push_policy != H2_PUSH_NONE) {
+ return "on";
+ }
+ }
+ else if (c && h2_session_push_enabled(ctx->session)) {
+ return "on";
+ }
+ }
+ else if (s) {
+ const h2_config *cfg = h2_config_sget(s);
+ if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) {
+ return "on";
+ }
+ }
+ return "off";
+}
+
+static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
+ return "PUSHED";
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
+ return apr_itoa(p, task->request->initiated_on);
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ if (ctx) {
+ h2_task *task = h2_ctx_get_task(ctx);
+ if (task) {
+ return task->id;
+ }
+ }
+ return "";
+}
+
+static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx)
+{
+ const char *cp = val_H2_STREAM_TAG(p, s, c, r, ctx);
+ if (cp && (cp = ap_strchr_c(cp, '-'))) {
+ return ++cp;
+ }
+ return NULL;
+}
+
+typedef const char *h2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, h2_ctx *ctx);
+typedef struct h2_var_def {
+ const char *name;
+ h2_var_lookup *lookup;
+ unsigned int subprocess : 1; /* should be set in r->subprocess_env */
+} h2_var_def;
+
+static h2_var_def H2_VARS[] = {
+ { "HTTP2", val_HTTP2, 1 },
+ { "H2PUSH", val_H2_PUSH, 1 },
+ { "H2_PUSH", val_H2_PUSH, 1 },
+ { "H2_PUSHED", val_H2_PUSHED, 1 },
+ { "H2_PUSHED_ON", val_H2_PUSHED_ON, 1 },
+ { "H2_STREAM_ID", val_H2_STREAM_ID, 1 },
+ { "H2_STREAM_TAG", val_H2_STREAM_TAG, 1 },
+};
+
+#ifndef H2_ALEN
+#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
+#endif
+
+
+static int http2_is_h2(conn_rec *c)
+{
+ return h2_ctx_get(c->master? c->master : c, 0) != NULL;
+}
+
+static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
+ conn_rec *c, request_rec *r, char *name)
+{
+ int i;
+ /* If the # of vars grow, we need to put definitions in a hash */
+ for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (!strcmp(vdef->name, name)) {
+ h2_ctx *ctx = (r? h2_ctx_rget(r) :
+ h2_ctx_get(c->master? c->master : c, 0));
+ return (char *)vdef->lookup(p, s, c, r, ctx);
+ }
+ }
+ return "";
+}
+
+static int h2_h2_fixups(request_rec *r)
+{
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ int i;
+
+ for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
+ h2_var_def *vdef = &H2_VARS[i];
+ if (vdef->subprocess) {
+ apr_table_setn(r->subprocess_env, vdef->name,
+ vdef->lookup(r->pool, r->server, r->connection,
+ r, ctx));
+ }
+ }
+ }
+ return DECLINED;
+}
diff --git a/modules/http2/mod_http2.dep b/modules/http2/mod_http2.dep
new file mode 100644
index 00000000..6e1a2b1e
--- /dev/null
+++ b/modules/http2/mod_http2.dep
@@ -0,0 +1,1612 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_http2.mak
+
+./h2_alt_svc.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_util.h"\
+
+
+./h2_bucket_eoc.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eoc.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+
+
+./h2_bucket_eos.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_stream.h"\
+
+
+./h2_config.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+
+
+./h2_conn.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_version.h"\
+ ".\h2_worker.h"\
+ ".\h2_workers.h"\
+
+
+./h2_conn_io.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eoc.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_config.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_util.h"\
+
+
+./h2_ctx.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_private.h"\
+ ".\h2_session.h"\
+ ".\h2_task.h"\
+
+
+./h2_filter.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_version.h"\
+
+
+./h2_from_h1.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_time.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_private.h"\
+ ".\h2_response.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_h2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\ssl\mod_ssl.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_int_queue.c : \
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+
+
+./h2_io.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_io_set.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_io_set.h"\
+ ".\h2_private.h"\
+
+
+./h2_mplx.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_ngn_shed.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_worker.h"\
+ ".\h2_workers.h"\
+ ".\mod_http2.h"\
+
+
+./h2_ngn_shed.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_ngn_shed.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_push.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_util.h"\
+
+
+./h2_request.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_core.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_util.h"\
+
+
+./h2_response.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_time.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_util.h"\
+
+
+./h2_session.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_base64.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_bucket_eoc.h"\
+ ".\h2_bucket_eos.h"\
+ ".\h2_config.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+ ".\h2_version.h"\
+ ".\h2_workers.h"\
+
+
+./h2_stream.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_switch.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_private.h"\
+ ".\h2_switch.h"\
+
+
+./h2_task.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_core.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_atomic.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_worker.h"\
+
+
+./h2_task_input.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_task_output.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_from_h1.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_response.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_task.h"\
+ ".\h2_util.h"\
+
+
+./h2_util.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_private.h"\
+ ".\h2_request.h"\
+ ".\h2_util.h"\
+
+
+./h2_worker.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_conn.h"\
+ ".\h2_ctx.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_task.h"\
+ ".\h2_worker.h"\
+
+
+./h2_workers.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_atomic.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_cond.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_private.h"\
+ ".\h2_task.h"\
+ ".\h2_worker.h"\
+ ".\h2_workers.h"\
+
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+./mod_http2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_queue.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ ".\h2.h"\
+ ".\h2_alt_svc.h"\
+ ".\h2_config.h"\
+ ".\h2_conn.h"\
+ ".\h2_conn_io.h"\
+ ".\h2_ctx.h"\
+ ".\h2_filter.h"\
+ ".\h2_h2.h"\
+ ".\h2_mplx.h"\
+ ".\h2_push.h"\
+ ".\h2_request.h"\
+ ".\h2_session.h"\
+ ".\h2_stream.h"\
+ ".\h2_switch.h"\
+ ".\h2_task.h"\
+ ".\h2_version.h"\
+ ".\mod_http2.h"\
+
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
new file mode 100644
index 00000000..94941487
--- /dev/null
+++ b/modules/http2/mod_http2.dsp
@@ -0,0 +1,203 @@
+# Microsoft Developer Studio Project File - Name="mod_http2" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_http2 - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_http2 - Win32 Release"
+# Name "mod_http2 - Win32 Debug"
+# Begin Source File
+
+SOURCE=./h2_alt_svc.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_bucket_beam.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_bucket_eoc.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_bucket_eos.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_conn.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_conn_io.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_ctx.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_filter.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_from_h1.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_h2.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_mplx.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_ngn_shed.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_push.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_request.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_response.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_session.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_stream.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_switch.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_task.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_worker.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_workers.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h
new file mode 100644
index 00000000..30735792
--- /dev/null
+++ b/modules/http2/mod_http2.h
@@ -0,0 +1,93 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOD_HTTP2_H__
+#define __MOD_HTTP2_H__
+
+/** The http2_var_lookup() optional function retrieves HTTP2 environment
+ * variables. */
+APR_DECLARE_OPTIONAL_FN(char *,
+ http2_var_lookup, (apr_pool_t *, server_rec *,
+ conn_rec *, request_rec *, char *));
+
+/** An optional function which returns non-zero if the given connection
+ * or its master connection is using HTTP/2. */
+APR_DECLARE_OPTIONAL_FN(int,
+ http2_is_h2, (conn_rec *));
+
+
+/*******************************************************************************
+ * HTTP/2 request engines
+ ******************************************************************************/
+
+struct apr_thread_cond_t;
+
+typedef struct h2_req_engine h2_req_engine;
+
+typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
+
+/**
+ * Initialize a h2_req_engine. The structure will be passed in but
+ * only the name and master are set. The function should initialize
+ * all fields.
+ * @param engine the allocated, partially filled structure
+ * @param r the first request to process, or NULL
+ */
+typedef apr_status_t http2_req_engine_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_uint32_t req_buffer_size,
+ request_rec *r,
+ http2_output_consumed **pconsumed,
+ void **pbaton);
+
+/**
+ * Push a request to an engine with the specified name for further processing.
+ * If no such engine is available, einit is not NULL, einit is called
+ * with a new engine record and the caller is responsible for running the
+ * new engine instance.
+ * @param engine_type the type of the engine to add the request to
+ * @param r the request to push to an engine for processing
+ * @param einit an optional initialization callback for a new engine
+ * of the requested type, should no instance be available.
+ * By passing a non-NULL callback, the caller is willing
+ * to init and run a new engine itself.
+ * @return APR_SUCCESS iff slave was successfully added to an engine
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_push, (const char *engine_type,
+ request_rec *r,
+ http2_req_engine_init *einit));
+
+/**
+ * Get a new request for processing in this engine.
+ * @param engine the engine which is done processing the slave
+ * @param timeout wait a maximum amount of time for a new slave, 0 will not wait
+ * @param pslave the slave connection that needs processing or NULL
+ * @return APR_SUCCESS if new request was assigned
+ * APR_EAGAIN if no new request is available
+ * APR_EOF if engine may shut down, as no more request will be scheduled
+ * APR_ECONNABORTED if the engine needs to shut down immediately
+ */
+APR_DECLARE_OPTIONAL_FN(apr_status_t,
+ http2_req_engine_pull, (h2_req_engine *engine,
+ apr_read_type_e block,
+ apr_uint32_t capacity,
+ request_rec **pr));
+APR_DECLARE_OPTIONAL_FN(void,
+ http2_req_engine_done, (h2_req_engine *engine,
+ conn_rec *rconn));
+#endif
diff --git a/modules/http2/mod_http2.mak b/modules/http2/mod_http2.mak
new file mode 100644
index 00000000..7f8b30cc
--- /dev/null
+++ b/modules/http2/mod_http2.mak
@@ -0,0 +1,560 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_http2.dsp
+!IF "$(CFG)" == ""
+CFG=mod_http2 - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_http2 - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_http2 - Win32 Release" && "$(CFG)" != "mod_http2 - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_http2.mak" CFG="mod_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
+ -@erase "$(INTDIR)\h2_bucket_eoc.obj"
+ -@erase "$(INTDIR)\h2_bucket_eos.obj"
+ -@erase "$(INTDIR)\h2_config.obj"
+ -@erase "$(INTDIR)\h2_conn.obj"
+ -@erase "$(INTDIR)\h2_conn_io.obj"
+ -@erase "$(INTDIR)\h2_ctx.obj"
+ -@erase "$(INTDIR)\h2_filter.obj"
+ -@erase "$(INTDIR)\h2_from_h1.obj"
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+ -@erase "$(INTDIR)\h2_ngn_shed.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_response.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+ -@erase "$(INTDIR)\h2_stream.obj"
+ -@erase "$(INTDIR)\h2_switch.obj"
+ -@erase "$(INTDIR)\h2_task.obj"
+ -@erase "$(INTDIR)\h2_util.obj"
+ -@erase "$(INTDIR)\h2_worker.obj"
+ -@erase "$(INTDIR)\h2_workers.obj"
+ -@erase "$(INTDIR)\mod_http2.obj"
+ -@erase "$(INTDIR)\mod_http2.res"
+ -@erase "$(INTDIR)\mod_http2_src.idb"
+ -@erase "$(INTDIR)\mod_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_http2.exp"
+ -@erase "$(OUTDIR)\mod_http2.lib"
+ -@erase "$(OUTDIR)\mod_http2.pdb"
+ -@erase "$(OUTDIR)\mod_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
+ "$(INTDIR)\h2_bucket_eoc.obj" \
+ "$(INTDIR)\h2_bucket_eos.obj" \
+ "$(INTDIR)\h2_config.obj" \
+ "$(INTDIR)\h2_conn.obj" \
+ "$(INTDIR)\h2_conn_io.obj" \
+ "$(INTDIR)\h2_ctx.obj" \
+ "$(INTDIR)\h2_filter.obj" \
+ "$(INTDIR)\h2_from_h1.obj" \
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+ "$(INTDIR)\h2_ngn_shed.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_response.obj" \
+ "$(INTDIR)\h2_session.obj" \
+ "$(INTDIR)\h2_stream.obj" \
+ "$(INTDIR)\h2_switch.obj" \
+ "$(INTDIR)\h2_task.obj" \
+ "$(INTDIR)\h2_util.obj" \
+ "$(INTDIR)\h2_worker.obj" \
+ "$(INTDIR)\h2_workers.obj" \
+ "$(INTDIR)\mod_http2.obj" \
+ "$(INTDIR)\mod_http2.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib"
+
+"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so"
+ if exist .\Release\mod_http2.so.manifest mt.exe -manifest .\Release\mod_http2.so.manifest -outputresource:.\Release\mod_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_alt_svc.obj"
+ -@erase "$(INTDIR)\h2_bucket_beam.obj"
+ -@erase "$(INTDIR)\h2_bucket_eoc.obj"
+ -@erase "$(INTDIR)\h2_bucket_eos.obj"
+ -@erase "$(INTDIR)\h2_config.obj"
+ -@erase "$(INTDIR)\h2_conn.obj"
+ -@erase "$(INTDIR)\h2_conn_io.obj"
+ -@erase "$(INTDIR)\h2_ctx.obj"
+ -@erase "$(INTDIR)\h2_filter.obj"
+ -@erase "$(INTDIR)\h2_from_h1.obj"
+ -@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_mplx.obj"
+ -@erase "$(INTDIR)\h2_ngn_shed.obj"
+ -@erase "$(INTDIR)\h2_push.obj"
+ -@erase "$(INTDIR)\h2_request.obj"
+ -@erase "$(INTDIR)\h2_response.obj"
+ -@erase "$(INTDIR)\h2_session.obj"
+ -@erase "$(INTDIR)\h2_stream.obj"
+ -@erase "$(INTDIR)\h2_switch.obj"
+ -@erase "$(INTDIR)\h2_task.obj"
+ -@erase "$(INTDIR)\h2_util.obj"
+ -@erase "$(INTDIR)\h2_worker.obj"
+ -@erase "$(INTDIR)\h2_workers.obj"
+ -@erase "$(INTDIR)\mod_http2.obj"
+ -@erase "$(INTDIR)\mod_http2.res"
+ -@erase "$(INTDIR)\mod_http2_src.idb"
+ -@erase "$(INTDIR)\mod_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_http2.exp"
+ -@erase "$(OUTDIR)\mod_http2.lib"
+ -@erase "$(OUTDIR)\mod_http2.pdb"
+ -@erase "$(OUTDIR)\mod_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_http2_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_http2.pdb" /debug /out:"$(OUTDIR)\mod_http2.so" /implib:"$(OUTDIR)\mod_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_http2.so
+LINK32_OBJS= \
+ "$(INTDIR)\h2_alt_svc.obj" \
+ "$(INTDIR)\h2_bucket_beam.obj" \
+ "$(INTDIR)\h2_bucket_eoc.obj" \
+ "$(INTDIR)\h2_bucket_eos.obj" \
+ "$(INTDIR)\h2_config.obj" \
+ "$(INTDIR)\h2_conn.obj" \
+ "$(INTDIR)\h2_conn_io.obj" \
+ "$(INTDIR)\h2_ctx.obj" \
+ "$(INTDIR)\h2_filter.obj" \
+ "$(INTDIR)\h2_from_h1.obj" \
+ "$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_mplx.obj" \
+ "$(INTDIR)\h2_ngn_shed.obj" \
+ "$(INTDIR)\h2_push.obj" \
+ "$(INTDIR)\h2_request.obj" \
+ "$(INTDIR)\h2_response.obj" \
+ "$(INTDIR)\h2_session.obj" \
+ "$(INTDIR)\h2_stream.obj" \
+ "$(INTDIR)\h2_switch.obj" \
+ "$(INTDIR)\h2_task.obj" \
+ "$(INTDIR)\h2_util.obj" \
+ "$(INTDIR)\h2_worker.obj" \
+ "$(INTDIR)\h2_workers.obj" \
+ "$(INTDIR)\mod_http2.obj" \
+ "$(INTDIR)\mod_http2.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib"
+
+"$(OUTDIR)\mod_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_http2.so"
+ if exist .\Debug\mod_http2.so.manifest mt.exe -manifest .\Debug\mod_http2.so.manifest -outputresource:.\Debug\mod_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_http2.dep")
+!INCLUDE "mod_http2.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_http2.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release" || "$(CFG)" == "mod_http2 - Win32 Debug"
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ENDIF
+
+SOURCE=./h2_alt_svc.c
+
+"$(INTDIR)\h2_alt_svc.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_bucket_beam.c
+
+"$(INTDIR)/h2_bucket_beam.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_bucket_eoc.c
+
+"$(INTDIR)\h2_bucket_eoc.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_bucket_eos.c
+
+"$(INTDIR)\h2_bucket_eos.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_config.c
+
+"$(INTDIR)\h2_config.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_conn.c
+
+"$(INTDIR)\h2_conn.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_conn_io.c
+
+"$(INTDIR)\h2_conn_io.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_ctx.c
+
+"$(INTDIR)\h2_ctx.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_filter.c
+
+"$(INTDIR)\h2_filter.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_from_h1.c
+
+"$(INTDIR)\h2_from_h1.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_h2.c
+
+"$(INTDIR)\h2_h2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_mplx.c
+
+"$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_ngn_shed.c
+
+"$(INTDIR)\h2_ngn_shed.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_push.c
+
+"$(INTDIR)\h2_push.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_request.c
+
+"$(INTDIR)\h2_request.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_response.c
+
+"$(INTDIR)\h2_response.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_session.c
+
+"$(INTDIR)\h2_session.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_stream.c
+
+"$(INTDIR)\h2_stream.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_switch.c
+
+"$(INTDIR)\h2_switch.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_task.c
+
+"$(INTDIR)\h2_task.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_util.c
+
+"$(INTDIR)\h2_util.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_worker.c
+
+"$(INTDIR)\h2_worker.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_workers.c
+
+"$(INTDIR)\h2_workers.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_http2 - Win32 Release"
+
+
+"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "NDEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_http2 - Win32 Debug"
+
+
+"$(INTDIR)\mod_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "../../build\win32" /d "_DEBUG" /d BIN_NAME="mod_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=./mod_http2.c
+
+"$(INTDIR)\mod_http2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+
diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
new file mode 100644
index 00000000..df1d7811
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.c
@@ -0,0 +1,650 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <nghttp2/nghttp2.h>
+
+#include <httpd.h>
+#include <mod_proxy.h>
+#include "mod_http2.h"
+
+
+#include "mod_proxy_http2.h"
+#include "h2_request.h"
+#include "h2_proxy_util.h"
+#include "h2_version.h"
+#include "h2_proxy_session.h"
+
+static void register_hook(apr_pool_t *p);
+
+AP_DECLARE_MODULE(proxy_http2) = {
+ STANDARD20_MODULE_STUFF,
+ NULL, /* create per-directory config structure */
+ NULL, /* merge per-directory config structures */
+ NULL, /* create per-server config structure */
+ NULL, /* merge per-server config structures */
+ NULL, /* command apr_table_t */
+ register_hook /* register hooks */
+};
+
+/* Optional functions from mod_http2 */
+static int (*is_h2)(conn_rec *c);
+static apr_status_t (*req_engine_push)(const char *name, request_rec *r,
+ http2_req_engine_init *einit);
+static apr_status_t (*req_engine_pull)(h2_req_engine *engine,
+ apr_read_type_e block,
+ apr_uint32_t capacity,
+ request_rec **pr);
+static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn);
+
+typedef struct h2_proxy_ctx {
+ conn_rec *owner;
+ apr_pool_t *pool;
+ request_rec *rbase;
+ server_rec *server;
+ const char *proxy_func;
+ char server_portstr[32];
+ proxy_conn_rec *p_conn;
+ proxy_worker *worker;
+ proxy_server_conf *conf;
+
+ h2_req_engine *engine;
+ const char *engine_id;
+ const char *engine_type;
+ apr_pool_t *engine_pool;
+ apr_uint32_t req_buffer_size;
+ request_rec *next;
+ apr_size_t capacity;
+
+ unsigned standalone : 1;
+ unsigned is_ssl : 1;
+ unsigned flushall : 1;
+
+ apr_status_t r_status; /* status of our first request work */
+ h2_proxy_session *session; /* current http2 session against backend */
+} h2_proxy_ctx;
+
+static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog,
+ apr_pool_t *ptemp, server_rec *s)
+{
+ void *data = NULL;
+ const char *init_key = "mod_proxy_http2_init_counter";
+ nghttp2_info *ngh2;
+ apr_status_t status = APR_SUCCESS;
+ (void)plog;(void)ptemp;
+
+ apr_pool_userdata_get(&data, init_key, s->process->pool);
+ if ( data == NULL ) {
+ apr_pool_userdata_set((const void *)1, init_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return APR_SUCCESS;
+ }
+
+ ngh2 = nghttp2_version(0);
+ ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03349)
+ "mod_proxy_http2 (v%s, nghttp2 %s), initializing...",
+ MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
+
+ is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2);
+ req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push);
+ req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull);
+ req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done);
+
+ /* we need all of them */
+ if (!req_engine_push || !req_engine_pull || !req_engine_done) {
+ req_engine_push = NULL;
+ req_engine_pull = NULL;
+ req_engine_done = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * canonicalize the url into the request, if it is meant for us.
+ * slightly modified copy from mod_http
+ */
+static int proxy_http2_canon(request_rec *r, char *url)
+{
+ char *host, *path, sport[7];
+ char *search = NULL;
+ const char *err;
+ const char *scheme;
+ const char *http_scheme;
+ apr_port_t port, def_port;
+
+ /* ap_port_of_scheme() */
+ if (ap_cstr_casecmpn(url, "h2c:", 4) == 0) {
+ url += 4;
+ scheme = "h2c";
+ http_scheme = "http";
+ }
+ else if (ap_cstr_casecmpn(url, "h2:", 3) == 0) {
+ url += 3;
+ scheme = "h2";
+ http_scheme = "https";
+ }
+ else {
+ return DECLINED;
+ }
+ port = def_port = ap_proxy_port_of_scheme(http_scheme);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "HTTP2: canonicalising URL %s", url);
+
+ /* do syntatic check.
+ * We break the URL into host, port, path, search
+ */
+ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
+ if (err) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03350)
+ "error parsing URL %s: %s", url, err);
+ return HTTP_BAD_REQUEST;
+ }
+
+ /*
+ * now parse path/search args, according to rfc1738:
+ * process the path.
+ *
+ * In a reverse proxy, our URL has been processed, so canonicalise
+ * unless proxy-nocanon is set to say it's raw
+ * In a forward proxy, we have and MUST NOT MANGLE the original.
+ */
+ switch (r->proxyreq) {
+ default: /* wtf are we doing here? */
+ case PROXYREQ_REVERSE:
+ if (apr_table_get(r->notes, "proxy-nocanon")) {
+ path = url; /* this is the raw path */
+ }
+ else {
+ path = ap_proxy_canonenc(r->pool, url, strlen(url),
+ enc_path, 0, r->proxyreq);
+ search = r->args;
+ }
+ break;
+ case PROXYREQ_PROXY:
+ path = url;
+ break;
+ }
+
+ if (path == NULL) {
+ return HTTP_BAD_REQUEST;
+ }
+
+ if (port != def_port) {
+ apr_snprintf(sport, sizeof(sport), ":%d", port);
+ }
+ else {
+ sport[0] = '\0';
+ }
+
+ if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */
+ host = apr_pstrcat(r->pool, "[", host, "]", NULL);
+ }
+ r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "://", host, sport,
+ "/", path, (search) ? "?" : "", (search) ? search : "", NULL);
+ return OK;
+}
+
+static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes)
+{
+ h2_proxy_ctx *ctx = baton;
+
+ if (ctx->session) {
+ h2_proxy_session_update_window(ctx->session, c, bytes);
+ }
+}
+
+static apr_status_t proxy_engine_init(h2_req_engine *engine,
+ const char *id,
+ const char *type,
+ apr_pool_t *pool,
+ apr_uint32_t req_buffer_size,
+ request_rec *r,
+ http2_output_consumed **pconsumed,
+ void **pctx)
+{
+ h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
+ &proxy_http2_module);
+ if (ctx) {
+ conn_rec *c = ctx->owner;
+ h2_proxy_ctx *nctx;
+
+ /* we need another lifetime for this. If we do not host
+ * an engine, the context lives in r->pool. Since we expect
+ * to server more than r, we need to live longer */
+ nctx = apr_pcalloc(pool, sizeof(*nctx));
+ if (nctx == NULL) {
+ return APR_ENOMEM;
+ }
+ memcpy(nctx, ctx, sizeof(*nctx));
+ ctx = nctx;
+ ctx->pool = pool;
+ ctx->engine = engine;
+ ctx->engine_id = id;
+ ctx->engine_type = type;
+ ctx->engine_pool = pool;
+ ctx->req_buffer_size = req_buffer_size;
+ ctx->capacity = 100;
+
+ ap_set_module_config(c->conn_config, &proxy_http2_module, ctx);
+
+ *pconsumed = out_consumed;
+ *pctx = ctx;
+ return APR_SUCCESS;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
+ "h2_proxy_session, engine init, no ctx found");
+ return APR_ENOTIMPL;
+}
+
+static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
+{
+ h2_proxy_ctx *ctx = session->user_data;
+ const char *url;
+ apr_status_t status;
+
+ url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE);
+ apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
+ ctx->p_conn->connection->local_addr->port));
+ status = h2_proxy_session_submit(session, url, r, ctx->standalone);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351)
+ "pass request body failed to %pI (%s) from %s (%s)",
+ ctx->p_conn->addr, ctx->p_conn->hostname ?
+ ctx->p_conn->hostname: "", session->c->client_ip,
+ session->c->remote_host ? session->c->remote_host: "");
+ }
+ return status;
+}
+
+static void request_done(h2_proxy_session *session, request_rec *r,
+ int complete, int touched)
+{
+ h2_proxy_ctx *ctx = session->user_data;
+ const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
+
+ if (!complete && !touched) {
+ /* untouched request, need rescheduling */
+ if (req_engine_push && is_h2 && is_h2(ctx->owner)) {
+ if (req_engine_push(ctx->engine_type, r, NULL) == APR_SUCCESS) {
+ /* push to engine */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
+ APLOGNO(03369)
+ "h2_proxy_session(%s): rescheduled request %s",
+ ctx->engine_id, task_id);
+ return;
+ }
+ }
+ }
+
+ if (r == ctx->rbase && complete) {
+ ctx->r_status = APR_SUCCESS;
+ }
+
+ if (complete) {
+ if (req_engine_done && ctx->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
+ APLOGNO(03370)
+ "h2_proxy_session(%s): finished request %s",
+ ctx->engine_id, task_id);
+ req_engine_done(ctx->engine, r->connection);
+ }
+ }
+ else {
+ if (req_engine_done && ctx->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
+ APLOGNO(03371)
+ "h2_proxy_session(%s): failed request %s",
+ ctx->engine_id, task_id);
+ req_engine_done(ctx->engine, r->connection);
+ }
+ }
+}
+
+static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave)
+{
+ if (ctx->next) {
+ return APR_SUCCESS;
+ }
+ else if (req_engine_pull && ctx->engine) {
+ apr_status_t status;
+ status = req_engine_pull(ctx->engine, before_leave?
+ APR_BLOCK_READ: APR_NONBLOCK_READ,
+ ctx->capacity, &ctx->next);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, ctx->owner,
+ "h2_proxy_engine(%s): pulled request (%s) %s",
+ ctx->engine_id,
+ before_leave? "before leave" : "regular",
+ (ctx->next? ctx->next->the_request : "NULL"));
+ return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status;
+ }
+ return APR_EOF;
+}
+
+static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
+ apr_status_t status = OK;
+
+ /* Step Four: Send the Request in a new HTTP/2 stream and
+ * loop until we got the response or encounter errors.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
+ "eng(%s): setup session", ctx->engine_id);
+ ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
+ 30, h2_log2(ctx->req_buffer_size),
+ request_done);
+ if (!ctx->session) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner,
+ APLOGNO(03372) "session unavailable");
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
+ "eng(%s): run session %s", ctx->engine_id, ctx->session->id);
+ ctx->session->user_data = ctx;
+
+ while (1) {
+ if (ctx->next) {
+ add_request(ctx->session, ctx->next);
+ ctx->next = NULL;
+ }
+
+ status = h2_proxy_session_process(ctx->session);
+
+ if (status == APR_SUCCESS) {
+ apr_status_t s2;
+ /* ongoing processing, call again */
+ if (ctx->session->remote_max_concurrent > 0
+ && ctx->session->remote_max_concurrent != ctx->capacity) {
+ ctx->capacity = ctx->session->remote_max_concurrent;
+ }
+ s2 = next_request(ctx, 0);
+ if (s2 == APR_ECONNABORTED) {
+ /* master connection gone */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner,
+ APLOGNO(03374) "eng(%s): pull request",
+ ctx->engine_id);
+ status = s2;
+ break;
+ }
+ if (!ctx->next && h2_ihash_empty(ctx->session->streams)) {
+ break;
+ }
+ }
+ else {
+ /* end of processing, maybe error */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03375) "eng(%s): end of session %s",
+ ctx->engine_id, ctx->session->id);
+ /*
+ * Any open stream of that session needs to
+ * a) be reopened on the new session iff safe to do so
+ * b) reported as done (failed) otherwise
+ */
+ h2_proxy_session_cleanup(ctx->session, request_done);
+ break;
+ }
+ }
+
+ ctx->session->user_data = NULL;
+ ctx->session = NULL;
+
+ return status;
+}
+
+static h2_proxy_ctx *push_request_somewhere(h2_proxy_ctx *ctx)
+{
+ conn_rec *c = ctx->owner;
+ const char *engine_type, *hostname;
+
+ hostname = (ctx->p_conn->ssl_hostname?
+ ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
+ engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
+ ctx->server_portstr);
+
+ if (c->master && req_engine_push && ctx->next && is_h2 && is_h2(c)) {
+ /* If we are have req_engine capabilities, push the handling of this
+ * request (e.g. slave connection) to a proxy_http2 engine which
+ * uses the same backend. We may be called to create an engine
+ * ourself. */
+ if (req_engine_push(engine_type, ctx->next, proxy_engine_init)
+ == APR_SUCCESS) {
+ /* to renew the lifetime, we might have set a new ctx */
+ ctx = ap_get_module_config(c->conn_config, &proxy_http2_module);
+ if (ctx->engine == NULL) {
+ /* Another engine instance has taken over processing of this
+ * request. */
+ ctx->r_status = SUSPENDED;
+ ctx->next = NULL;
+ return ctx;
+ }
+ }
+ }
+
+ if (!ctx->engine) {
+ /* No engine was available or has been initialized, handle this
+ * request just by ourself. */
+ ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
+ ctx->engine_type = engine_type;
+ ctx->engine_pool = ctx->pool;
+ ctx->req_buffer_size = (32*1024);
+ ctx->standalone = 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_proxy_http2(%ld): setup standalone engine for type %s",
+ c->id, engine_type);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "H2: hosting engine %s", ctx->engine_id);
+ }
+ return ctx;
+}
+
+static int proxy_http2_handler(request_rec *r,
+ proxy_worker *worker,
+ proxy_server_conf *conf,
+ char *url,
+ const char *proxyname,
+ apr_port_t proxyport)
+{
+ const char *proxy_func;
+ char *locurl = url, *u;
+ apr_size_t slen;
+ int is_ssl = 0;
+ apr_status_t status;
+ h2_proxy_ctx *ctx;
+ apr_uri_t uri;
+ int reconnected = 0;
+
+ /* find the scheme */
+ if ((url[0] != 'h' && url[0] != 'H') || url[1] != '2') {
+ return DECLINED;
+ }
+ u = strchr(url, ':');
+ if (u == NULL || u[1] != '/' || u[2] != '/' || u[3] == '\0') {
+ return DECLINED;
+ }
+ slen = (u - url);
+ switch(slen) {
+ case 2:
+ proxy_func = "H2";
+ is_ssl = 1;
+ break;
+ case 3:
+ if (url[2] != 'c' && url[2] != 'C') {
+ return DECLINED;
+ }
+ proxy_func = "H2C";
+ break;
+ default:
+ return DECLINED;
+ }
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->owner = r->connection;
+ ctx->pool = r->pool;
+ ctx->rbase = r;
+ ctx->server = r->server;
+ ctx->proxy_func = proxy_func;
+ ctx->is_ssl = is_ssl;
+ ctx->worker = worker;
+ ctx->conf = conf;
+ ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
+ ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
+ ctx->next = r;
+ r = NULL;
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
+
+ /* scheme says, this is for us. */
+ apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase,
+ "H2: serving URL %s", url);
+
+run_connect:
+ /* Get a proxy_conn_rec from the worker, might be a new one, might
+ * be one still open from another request, or it might fail if the
+ * worker is stopped or in error. */
+ if ((status = ap_proxy_acquire_connection(ctx->proxy_func, &ctx->p_conn,
+ ctx->worker, ctx->server)) != OK) {
+ goto cleanup;
+ }
+
+ ctx->p_conn->is_ssl = ctx->is_ssl;
+ if (ctx->is_ssl && ctx->p_conn->connection) {
+ /* If there are some metadata on the connection (e.g. TLS alert),
+ * let mod_ssl detect them, and create a new connection below.
+ */
+ apr_bucket_brigade *tmp_bb;
+ tmp_bb = apr_brigade_create(ctx->rbase->pool,
+ ctx->rbase->connection->bucket_alloc);
+ status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb,
+ AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1);
+ if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
+ ctx->p_conn->close = 1;
+ }
+ apr_brigade_cleanup(tmp_bb);
+ }
+
+ /* Step One: Determine the URL to connect to (might be a proxy),
+ * initialize the backend accordingly and determine the server
+ * port string we can expect in responses. */
+ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
+ ctx->p_conn, &uri, &locurl,
+ proxyname, proxyport,
+ ctx->server_portstr,
+ sizeof(ctx->server_portstr))) != OK) {
+ goto cleanup;
+ }
+
+ /* If we are not already hosting an engine, try to push the request
+ * to an already existing engine or host a new engine here. */
+ if (!ctx->engine) {
+ ctx = push_request_somewhere(ctx);
+ if (ctx->r_status == SUSPENDED) {
+ /* request was pushed to another engine */
+ goto cleanup;
+ }
+ }
+
+ /* Step Two: Make the Connection (or check that an already existing
+ * socket is still usable). On success, we have a socket connected to
+ * backend->hostname. */
+ if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker,
+ ctx->server)) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, ctx->owner, APLOGNO(03352)
+ "H2: failed to make connection to backend: %s",
+ ctx->p_conn->hostname);
+ goto cleanup;
+ }
+
+ /* Step Three: Create conn_rec for the socket we have open now. */
+ if (!ctx->p_conn->connection) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+ "setup new connection: is_ssl=%d %s %s %s",
+ ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
+ locurl, ctx->p_conn->hostname);
+ if ((status = ap_proxy_connection_create(ctx->proxy_func, ctx->p_conn,
+ ctx->owner,
+ ctx->server)) != OK) {
+ goto cleanup;
+ }
+
+ /*
+ * On SSL connections set a note on the connection what CN is
+ * requested, such that mod_ssl can check if it is requested to do
+ * so.
+ */
+ if (ctx->p_conn->ssl_hostname) {
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-hostname", ctx->p_conn->ssl_hostname);
+ }
+
+ if (ctx->is_ssl) {
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-alpn-protos", "h2");
+ }
+ }
+
+run_session:
+ status = proxy_engine_run(ctx);
+ if (status == APR_SUCCESS) {
+ /* session and connection still ok */
+ if (next_request(ctx, 1) == APR_SUCCESS) {
+ /* more requests, run again */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376)
+ "run_session, again");
+ goto run_session;
+ }
+ /* done */
+ ctx->engine = NULL;
+ }
+
+cleanup:
+ if (!reconnected && ctx->engine && next_request(ctx, 1) == APR_SUCCESS) {
+ /* Still more to do, tear down old conn and start over */
+ if (ctx->p_conn) {
+ ctx->p_conn->close = 1;
+ /*only in trunk so far */
+ /*proxy_run_detach_backend(r, ctx->p_conn);*/
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+ reconnected = 1; /* we do this only once, then fail */
+ goto run_connect;
+ }
+
+ if (ctx->p_conn) {
+ if (status != APR_SUCCESS) {
+ /* close socket when errors happened or session shut down (EOF) */
+ ctx->p_conn->close = 1;
+ }
+ /*only in trunk so far */
+ /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/
+ ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
+ ctx->p_conn = NULL;
+ }
+
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
+ APLOGNO(03377) "leaving handler");
+ return ctx->r_status;
+}
+
+static void register_hook(apr_pool_t *p)
+{
+ ap_hook_post_config(h2_proxy_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+
+ proxy_hook_scheme_handler(proxy_http2_handler, NULL, NULL, APR_HOOK_FIRST);
+ proxy_hook_canon_handler(proxy_http2_canon, NULL, NULL, APR_HOOK_FIRST);
+}
+
diff --git a/modules/http2/mod_proxy_http2.dep b/modules/http2/mod_proxy_http2.dep
new file mode 100644
index 00000000..641fca64
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dep
@@ -0,0 +1,208 @@
+# Microsoft Developer Studio Generated Dependency File, included by mod_proxy_http2.mak
+
+./h2_proxy_session.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_mpm.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\mpm_common.h"\
+ "..\..\include\os.h"\
+ "..\..\include\scoreboard.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_proxy_util.h"\
+ ".\mod_http2.h"\
+
+
+./h2_proxy_util.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_util.h"\
+
+
+..\..\build\win32\httpd.rc : \
+ "..\..\include\ap_release.h"\
+
+
+./mod_proxy_http2.c : \
+ "..\..\include\ap_config.h"\
+ "..\..\include\ap_config_layout.h"\
+ "..\..\include\ap_expr.h"\
+ "..\..\include\ap_hooks.h"\
+ "..\..\include\ap_mmn.h"\
+ "..\..\include\ap_provider.h"\
+ "..\..\include\ap_regex.h"\
+ "..\..\include\ap_release.h"\
+ "..\..\include\ap_slotmem.h"\
+ "..\..\include\apache_noprobes.h"\
+ "..\..\include\http_config.h"\
+ "..\..\include\http_connection.h"\
+ "..\..\include\http_core.h"\
+ "..\..\include\http_log.h"\
+ "..\..\include\http_main.h"\
+ "..\..\include\http_protocol.h"\
+ "..\..\include\http_request.h"\
+ "..\..\include\http_vhost.h"\
+ "..\..\include\httpd.h"\
+ "..\..\include\mod_proxy.h"\
+ "..\..\include\os.h"\
+ "..\..\include\util_cfgtree.h"\
+ "..\..\include\util_charset.h"\
+ "..\..\include\util_ebcdic.h"\
+ "..\..\include\util_filter.h"\
+ "..\..\include\util_mutex.h"\
+ "..\..\srclib\apr-util\include\apr_buckets.h"\
+ "..\..\srclib\apr-util\include\apr_date.h"\
+ "..\..\srclib\apr-util\include\apr_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_md5.h"\
+ "..\..\srclib\apr-util\include\apr_optional.h"\
+ "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
+ "..\..\srclib\apr-util\include\apr_reslist.h"\
+ "..\..\srclib\apr-util\include\apr_strmatch.h"\
+ "..\..\srclib\apr-util\include\apr_uri.h"\
+ "..\..\srclib\apr-util\include\apr_uuid.h"\
+ "..\..\srclib\apr-util\include\apr_xlate.h"\
+ "..\..\srclib\apr-util\include\apu.h"\
+ "..\..\srclib\apr\include\apr.h"\
+ "..\..\srclib\apr\include\apr_allocator.h"\
+ "..\..\srclib\apr\include\apr_dso.h"\
+ "..\..\srclib\apr\include\apr_errno.h"\
+ "..\..\srclib\apr\include\apr_file_info.h"\
+ "..\..\srclib\apr\include\apr_file_io.h"\
+ "..\..\srclib\apr\include\apr_fnmatch.h"\
+ "..\..\srclib\apr\include\apr_general.h"\
+ "..\..\srclib\apr\include\apr_global_mutex.h"\
+ "..\..\srclib\apr\include\apr_hash.h"\
+ "..\..\srclib\apr\include\apr_inherit.h"\
+ "..\..\srclib\apr\include\apr_lib.h"\
+ "..\..\srclib\apr\include\apr_mmap.h"\
+ "..\..\srclib\apr\include\apr_network_io.h"\
+ "..\..\srclib\apr\include\apr_poll.h"\
+ "..\..\srclib\apr\include\apr_pools.h"\
+ "..\..\srclib\apr\include\apr_portable.h"\
+ "..\..\srclib\apr\include\apr_proc_mutex.h"\
+ "..\..\srclib\apr\include\apr_ring.h"\
+ "..\..\srclib\apr\include\apr_shm.h"\
+ "..\..\srclib\apr\include\apr_strings.h"\
+ "..\..\srclib\apr\include\apr_tables.h"\
+ "..\..\srclib\apr\include\apr_thread_mutex.h"\
+ "..\..\srclib\apr\include\apr_thread_proc.h"\
+ "..\..\srclib\apr\include\apr_time.h"\
+ "..\..\srclib\apr\include\apr_user.h"\
+ "..\..\srclib\apr\include\apr_want.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2.h"\
+ "..\..\srclib\nghttp2\lib\includes\nghttp2\nghttp2ver.h"\
+ ".\h2.h"\
+ ".\h2_proxy_session.h"\
+ ".\h2_request.h"\
+ ".\h2_proxy_util.h"\
+ ".\h2_version.h"\
+ ".\mod_http2.h"\
+ ".\mod_proxy_http2.h"\
+
diff --git a/modules/http2/mod_proxy_http2.dsp b/modules/http2/mod_proxy_http2.dsp
new file mode 100644
index 00000000..5d6305fd
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.dsp
@@ -0,0 +1,119 @@
+# Microsoft Developer Studio Project File - Name="mod_proxy_http2" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MD /W3 /O2 /Oy- /Zi /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Release\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /fo"Release/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Release\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+# Begin Special Build Tool
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /EHsc /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "ssize_t=long" /FD /c
+# ADD CPP /nologo /MDd /W3 /EHsc /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D "ssize_t=long" /Fd"Debug\mod_proxy_http2_src" /FD /c
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /fo"Debug/mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# ADD LINK32 kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /incremental:no /debug /out:".\Debug\mod_proxy_http2.so" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+# Begin Special Build Tool
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).manifest -outputresource:$(TargetPath);2
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "mod_proxy_http2 - Win32 Release"
+# Name "mod_proxy_http2 - Win32 Debug"
+# Begin Source File
+
+SOURCE=./h2_proxy_session.c
+# End Source File
+# Begin Source File
+
+SOURCE=./h2_proxy_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=./mod_proxy_http2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\..\build\win32\httpd.rc
+# End Source File
+# End Target
+# End Project
diff --git a/modules/http2/mod_proxy_http2.h b/modules/http2/mod_proxy_http2.h
new file mode 100644
index 00000000..7da84f0f
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.h
@@ -0,0 +1,20 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOD_PROXY_HTTP2_H__
+#define __MOD_PROXY_HTTP2_H__
+
+
+#endif
diff --git a/modules/http2/mod_proxy_http2.mak b/modules/http2/mod_proxy_http2.mak
new file mode 100644
index 00000000..e8e06241
--- /dev/null
+++ b/modules/http2/mod_proxy_http2.mak
@@ -0,0 +1,427 @@
+# Microsoft Developer Studio Generated NMAKE File, Based on mod_proxy_http2.dsp
+!IF "$(CFG)" == ""
+CFG=mod_proxy_http2 - Win32 Release
+!MESSAGE No configuration specified. Defaulting to mod_proxy_http2 - Win32 Release.
+!ENDIF
+
+!IF "$(CFG)" != "mod_proxy_http2 - Win32 Release" && "$(CFG)" != "mod_proxy_http2 - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "mod_proxy_http2.mak" CFG="mod_proxy_http2 - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "mod_proxy_http2 - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "mod_proxy_http2 - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+OUTDIR=.\Release
+INTDIR=.\Release
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Release" "mod_http2 - Win32 Release" "libhttpd - Win32 Release" "libaprutil - Win32 Release" "libapr - Win32 Release" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 ReleaseCLEAN" "libaprutil - Win32 ReleaseCLEAN" "libhttpd - Win32 ReleaseCLEAN" "mod_http2 - Win32 ReleaseCLEAN" "mod_proxy - Win32 ReleaseCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MD /W3 /Zi /O2 /Oy- /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "NDEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so /opt:ref
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Release\libapr-1.lib" \
+ "..\..\srclib\apr-util\Release\libaprutil-1.lib" \
+ "..\..\Release\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Release\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Release\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Release
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Release\mod_proxy_http2.so.manifest mt.exe -manifest .\Release\mod_proxy_http2.so.manifest -outputresource:.\Release\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+OUTDIR=.\Debug
+INTDIR=.\Debug
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+!IF "$(RECURSE)" == "0"
+
+ALL : "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ELSE
+
+ALL : "mod_proxy - Win32 Debug" "mod_http2 - Win32 Debug" "libhttpd - Win32 Debug" "libaprutil - Win32 Debug" "libapr - Win32 Debug" "$(OUTDIR)\mod_proxy_http2.so" "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+!IF "$(RECURSE)" == "1"
+CLEAN :"libapr - Win32 DebugCLEAN" "libaprutil - Win32 DebugCLEAN" "libhttpd - Win32 DebugCLEAN" "mod_http2 - Win32 DebugCLEAN" "mod_proxy - Win32 DebugCLEAN"
+!ELSE
+CLEAN :
+!ENDIF
+ -@erase "$(INTDIR)\h2_proxy_session.obj"
+ -@erase "$(INTDIR)\h2_proxy_util.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.obj"
+ -@erase "$(INTDIR)\mod_proxy_http2.res"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.idb"
+ -@erase "$(INTDIR)\mod_proxy_http2_src.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.exp"
+ -@erase "$(OUTDIR)\mod_proxy_http2.lib"
+ -@erase "$(OUTDIR)\mod_proxy_http2.pdb"
+ -@erase "$(OUTDIR)\mod_proxy_http2.so"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+CPP_PROJ=/nologo /MDd /W3 /Zi /Od /I "../ssl" /I "../../include" /I "../../srclib/apr/include" /I "../../srclib/apr-util/include" /I "../../srclib/nghttp2/lib/includes" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /D ssize_t=long /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\mod_proxy_http2_src" /FD /EHsc /c
+
+.c{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.obj::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.c{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cpp{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+.cxx{$(INTDIR)}.sbr::
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+MTL=midl.exe
+MTL_PROJ=/nologo /D "_DEBUG" /mktyplib203 /win32
+RSC=rc.exe
+RSC_PROJ=/l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache"
+BSC32=bscmake.exe
+BSC32_FLAGS=/nologo /o"$(OUTDIR)\mod_proxy_http2.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+LINK32_FLAGS=kernel32.lib nghttp2d.lib /nologo /subsystem:windows /dll /incremental:no /pdb:"$(OUTDIR)\mod_proxy_http2.pdb" /debug /out:"$(OUTDIR)\mod_proxy_http2.so" /implib:"$(OUTDIR)\mod_proxy_http2.lib" /libpath:"..\..\srclib\nghttp2\lib\MSVC_obj" /base:@..\..\os\win32\BaseAddr.ref,mod_proxy_http2.so
+LINK32_OBJS= \
+ "$(INTDIR)\h2_proxy_session.obj" \
+ "$(INTDIR)\h2_proxy_util.obj" \
+ "$(INTDIR)\mod_proxy_http2.obj" \
+ "$(INTDIR)\mod_proxy_http2.res" \
+ "..\..\srclib\apr\Debug\libapr-1.lib" \
+ "..\..\srclib\apr-util\Debug\libaprutil-1.lib" \
+ "..\..\Debug\libhttpd.lib" \
+ "$(OUTDIR)\mod_http2.lib" \
+ "..\proxy\Debug\mod_proxy.lib"
+
+"$(OUTDIR)\mod_proxy_http2.so" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+TargetPath=.\Debug\mod_proxy_http2.so
+SOURCE="$(InputPath)"
+PostBuild_Desc=Embed .manifest
+DS_POSTBUILD_DEP=$(INTDIR)\postbld.dep
+
+# Begin Custom Macros
+OutDir=.\Debug
+# End Custom Macros
+
+"$(DS_POSTBUILD_DEP)" : "$(OUTDIR)\mod_proxy_http2.so"
+ if exist .\Debug\mod_proxy_http2.so.manifest mt.exe -manifest .\Debug\mod_proxy_http2.so.manifest -outputresource:.\Debug\mod_proxy_http2.so;2
+ echo Helper for Post-build step > "$(DS_POSTBUILD_DEP)"
+
+!ENDIF
+
+
+!IF "$(NO_EXTERNAL_DEPS)" != "1"
+!IF EXISTS("mod_proxy_http2.dep")
+!INCLUDE "mod_proxy_http2.dep"
+!ELSE
+!MESSAGE Warning: cannot find "mod_proxy_http2.dep"
+!ENDIF
+!ENDIF
+
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release" || "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libapr - Win32 Release" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libapr - Win32 Debug" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libapr - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libapr.mak" CFG="libapr - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libaprutil - Win32 Release" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 ReleaseCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Release" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libaprutil - Win32 Debug" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug"
+ cd "..\..\modules\http2"
+
+"libaprutil - Win32 DebugCLEAN" :
+ cd ".\..\..\srclib\apr-util"
+ $(MAKE) /$(MAKEFLAGS) /F ".\libaprutil.mak" CFG="libaprutil - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\..\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"libhttpd - Win32 Release" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 ReleaseCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Release" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"libhttpd - Win32 Debug" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug"
+ cd ".\modules\http2"
+
+"libhttpd - Win32 DebugCLEAN" :
+ cd ".\..\.."
+ $(MAKE) /$(MAKEFLAGS) /F ".\libhttpd.mak" CFG="libhttpd - Win32 Debug" RECURSE=1 CLEAN
+ cd ".\modules\http2"
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_http2 - Win32 Release" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release"
+ cd "."
+
+"mod_http2 - Win32 ReleaseCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Release" RECURSE=1 CLEAN
+ cd "."
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_http2 - Win32 Debug" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug"
+ cd "."
+
+"mod_http2 - Win32 DebugCLEAN" :
+ cd "."
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_http2.mak" CFG="mod_http2 - Win32 Debug" RECURSE=1 CLEAN
+ cd "."
+
+!ENDIF
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+"mod_proxy - Win32 Release" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release"
+ cd "..\http2"
+
+"mod_proxy - Win32 ReleaseCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Release" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+"mod_proxy - Win32 Debug" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug"
+ cd "..\http2"
+
+"mod_proxy - Win32 DebugCLEAN" :
+ cd ".\..\proxy"
+ $(MAKE) /$(MAKEFLAGS) /F ".\mod_proxy.mak" CFG="mod_proxy - Win32 Debug" RECURSE=1 CLEAN
+ cd "..\http2"
+
+!ENDIF
+
+SOURCE=./h2_proxy_session.c
+
+"$(INTDIR)\h2_proxy_session.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=./h2_proxy_util.c
+
+"$(INTDIR)\h2_proxy_util.obj" : $(SOURCE) "$(INTDIR)"
+
+
+SOURCE=..\..\build\win32\httpd.rc
+
+!IF "$(CFG)" == "mod_proxy_http2 - Win32 Release"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "NDEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "mod_proxy_http2 - Win32 Debug"
+
+
+"$(INTDIR)\mod_proxy_http2.res" : $(SOURCE) "$(INTDIR)"
+ $(RSC) /l 0x409 /fo"$(INTDIR)\mod_proxy_http2.res" /i "../../include" /i "../../srclib/apr/include" /i "\Build11\httpd-2.4.21-dev-mph2\build\win32" /d "_DEBUG" /d BIN_NAME="mod_proxy_http2.so" /d LONG_NAME="http2_module for Apache" $(SOURCE)
+
+
+!ENDIF
+
+SOURCE=./mod_proxy_http2.c
+
+"$(INTDIR)\mod_proxy_http2.obj" : $(SOURCE) "$(INTDIR)"
+
+
+
+!ENDIF
+