summaryrefslogtreecommitdiff
path: root/sysutils
diff options
context:
space:
mode:
authorbouyer <bouyer>2014-11-27 15:36:01 +0000
committerbouyer <bouyer>2014-11-27 15:36:01 +0000
commitde2a4da800850c7597a8dd4fe17b43cc37f38a1b (patch)
tree4d1d23cd7dea659abf1c42be44993aa78ac1da82 /sysutils
parentfe0281807e277b058f09f9b6c6870af23165b170 (diff)
downloadpkgsrc-de2a4da800850c7597a8dd4fe17b43cc37f38a1b.tar.gz
backport patches from Xen advisory:
CVE-2014-7188/XSA-108: x86/HVM: properly bound x2APIC MSR range, fixing: A buggy or malicious HVM guest can crash the host or read data relating to other guests or the hypervisor itself. CVE-2014-8594/XSA-109: x86: don't allow page table updates on non-PV page tables in do_mmu_update(), fixing: Malicious or buggy stub domain kernels or tool stacks otherwise living outside of Domain0 can mount a denial of service attack which, if successful, can affect the whole system. CVE-2014-8595/XSA-110: x86emul: enforce privilege level restrictions when loading CS, fixing: Malicious HVM guest user mode code may be able to elevate its privileges to guest supervisor mode, or to crash the guest. CVE-2014-8866/XSA-111: x86: limit checks in hypercall_xlat_continuation() to actual arguments, fixing: A buggy or malicious HVM guest can crash the host. CVE-2014-8867/XSA-112: x86/HVM: confine internally handled MMIO to solitary regions, fixing: A buggy or malicious HVM guest can crash the host. CVE-2014-9030/XSA-113: x86/mm: fix a reference counting error in MMU_MACHPHYS_UPDATE, fixing: Malicious or buggy stub domain kernels or tool stacks otherwise living outside of Domain0 can mount a denial of service attack which, if successful, can affect the whole system.
Diffstat (limited to 'sysutils')
-rw-r--r--sysutils/xenkernel41/Makefile4
-rw-r--r--sysutils/xenkernel41/distinfo8
-rw-r--r--sysutils/xenkernel41/patches/patch-CVE-2014-718838
-rw-r--r--sysutils/xenkernel41/patches/patch-CVE-2014-859427
-rw-r--r--sysutils/xenkernel41/patches/patch-CVE-2014-8595158
-rw-r--r--sysutils/xenkernel41/patches/patch-CVE-2014-8866115
-rw-r--r--sysutils/xenkernel41/patches/patch-CVE-2014-886790
-rw-r--r--sysutils/xenkernel41/patches/patch-CVE-2014-903046
8 files changed, 483 insertions, 3 deletions
diff --git a/sysutils/xenkernel41/Makefile b/sysutils/xenkernel41/Makefile
index 6e8243051bd..e85671fcb12 100644
--- a/sysutils/xenkernel41/Makefile
+++ b/sysutils/xenkernel41/Makefile
@@ -1,9 +1,9 @@
-# $NetBSD: Makefile,v 1.40 2014/10/01 17:18:22 drochner Exp $
+# $NetBSD: Makefile,v 1.41 2014/11/27 15:36:01 bouyer Exp $
VERSION= 4.1.6.1
DISTNAME= xen-${VERSION}
PKGNAME= xenkernel41-${VERSION}
-PKGREVISION= 12
+PKGREVISION= 13
CATEGORIES= sysutils
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${VERSION}/
diff --git a/sysutils/xenkernel41/distinfo b/sysutils/xenkernel41/distinfo
index 1870679ad96..3fbb85fb7e3 100644
--- a/sysutils/xenkernel41/distinfo
+++ b/sysutils/xenkernel41/distinfo
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.31 2014/10/01 17:18:22 drochner Exp $
+$NetBSD: distinfo,v 1.32 2014/11/27 15:36:01 bouyer Exp $
SHA1 (xen-4.1.6.1.tar.gz) = e5f15feb0821578817a65ede16110c6eac01abd0
RMD160 (xen-4.1.6.1.tar.gz) = bff11421fc44a26f2cc3156713267abcb36d7a19
@@ -20,6 +20,12 @@ SHA1 (patch-CVE-2014-4021) = ee8ee800b35f7eaa242b06536c1ffa6568305b36
SHA1 (patch-CVE-2014-7154) = 5f0541559d911778aa5267bb5c0e1e8a9a3904e2
SHA1 (patch-CVE-2014-7155) = 0f1aa6a5d4fdb8403fc1e01b884491a63de501f8
SHA1 (patch-CVE-2014-7156) = 85043bdcf2644227d135f725cb442aade565c9d6
+SHA1 (patch-CVE-2014-7188) = b6bac1d466ba5bc276bc3aea9d4c9df37f2b9b0f
+SHA1 (patch-CVE-2014-8594) = 39d9d220d89c2356fa745dad5bf8c7ef5e8f2516
+SHA1 (patch-CVE-2014-8595) = 46bd285b7eb8f2e23984f7917b12af2191bfef80
+SHA1 (patch-CVE-2014-8866) = ee0bc3afb767b50e973d6065b84adc7e51949def
+SHA1 (patch-CVE-2014-8867) = 576433746660f62b753088a66c5315a1a2ff8f76
+SHA1 (patch-CVE-2014-9030) = f52c302585b0f4b074f7562e6b8cddacb26deee4
SHA1 (patch-Config.mk) = a43ed1b3304d6383dc093acd128a7f373d0ca266
SHA1 (patch-xen_Makefile) = d1c7e4860221f93d90818f45a77748882486f92b
SHA1 (patch-xen_arch_x86_Rules.mk) = 6b9b4bfa28924f7d3f6c793a389f1a7ac9d228e2
diff --git a/sysutils/xenkernel41/patches/patch-CVE-2014-7188 b/sysutils/xenkernel41/patches/patch-CVE-2014-7188
new file mode 100644
index 00000000000..5108be77b6b
--- /dev/null
+++ b/sysutils/xenkernel41/patches/patch-CVE-2014-7188
@@ -0,0 +1,38 @@
+$NetBSD: patch-CVE-2014-7188,v 1.1 2014/11/27 15:36:02 bouyer Exp $
+
+x86/HVM: properly bound x2APIC MSR range
+
+While the write path change appears to be purely cosmetic (but still
+gets done here for consistency), the read side mistake permitted
+accesses beyond the virtual APIC page.
+
+Note that while this isn't fully in line with the specification
+(digesting MSRs 0x800-0xBFF for the x2APIC), this is the minimal
+possible fix addressing the security issue and getting x2APIC related
+code into a consistent shape (elsewhere a 256 rather than 1024 wide
+window is being used too). This will be dealt with subsequently.
+
+This is XSA-108.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- xen/arch/x86/hvm/hvm.c.orig
++++ xen/arch/x86/hvm/hvm.c
+@@ -4380,7 +4380,7 @@ int hvm_msr_read_intercept(unsigned int
+ *msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
+ break;
+
+- case MSR_IA32_APICBASE_MSR ... MSR_IA32_APICBASE_MSR + 0x3ff:
++ case MSR_IA32_APICBASE_MSR ... MSR_IA32_APICBASE_MSR + 0xff:
+ if ( hvm_x2apic_msr_read(v, msr, msr_content) )
+ goto gp_fault;
+ break;
+@@ -4506,7 +4506,7 @@ int hvm_msr_write_intercept(unsigned int
+ vlapic_tdt_msr_set(vcpu_vlapic(v), msr_content);
+ break;
+
+- case MSR_IA32_APICBASE_MSR ... MSR_IA32_APICBASE_MSR + 0x3ff:
++ case MSR_IA32_APICBASE_MSR ... MSR_IA32_APICBASE_MSR + 0xff:
+ if ( hvm_x2apic_msr_write(v, msr, msr_content) )
+ goto gp_fault;
+ break;
diff --git a/sysutils/xenkernel41/patches/patch-CVE-2014-8594 b/sysutils/xenkernel41/patches/patch-CVE-2014-8594
new file mode 100644
index 00000000000..3d2e8b43465
--- /dev/null
+++ b/sysutils/xenkernel41/patches/patch-CVE-2014-8594
@@ -0,0 +1,27 @@
+$NetBSD: patch-CVE-2014-8594,v 1.1 2014/11/27 15:36:02 bouyer Exp $
+
+x86: don't allow page table updates on non-PV page tables in do_mmu_update()
+
+paging_write_guest_entry() and paging_cmpxchg_guest_entry() aren't
+consistently supported for non-PV guests (they'd deref NULL for PVH or
+non-HAP HVM ones). Don't allow respective MMU_* operations on the
+page tables of such domains.
+
+This is XSA-109.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/mm.c.orig 2014-11-27 15:21:15.000000000 +0100
++++ xen/arch/x86/mm.c 2014-11-27 15:26:06.000000000 +0100
+@@ -3695,6 +3695,10 @@
+ {
+ p2m_type_t p2mt;
+
++ rc = -EOPNOTSUPP;
++ if ( unlikely(paging_mode_refcounts(pt_owner)) )
++ break;
++
+ rc = xsm_mmu_normal_update(d, pg_owner, req.val);
+ if ( rc )
+ break;
diff --git a/sysutils/xenkernel41/patches/patch-CVE-2014-8595 b/sysutils/xenkernel41/patches/patch-CVE-2014-8595
new file mode 100644
index 00000000000..66954ed447c
--- /dev/null
+++ b/sysutils/xenkernel41/patches/patch-CVE-2014-8595
@@ -0,0 +1,158 @@
+$NetBSD: patch-CVE-2014-8595,v 1.1 2014/11/27 15:36:02 bouyer Exp $
+
+x86emul: enforce privilege level restrictions when loading CS
+
+Privilege level checks were basically missing for the CS case, the
+only check that was done (RPL == DPL for nonconforming segments)
+was solely covering a single special case (return to non-conforming
+segment).
+
+Additionally in long mode the L bit set requires the D bit to be clear,
+as was recently pointed out for KVM by Nadav Amit
+<namit@cs.technion.ac.il>.
+
+Finally we also need to force the loaded selector's RPL to CPL (at
+least as long as lret/retf emulation doesn't support privilege level
+changes).
+
+This is XSA-110.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/x86_emulate/x86_emulate.c.orig
++++ xen/arch/x86/x86_emulate/x86_emulate.c
+@@ -1107,7 +1107,7 @@ realmode_load_seg(
+ static int
+ protmode_load_seg(
+ enum x86_segment seg,
+- uint16_t sel,
++ uint16_t sel, bool_t is_ret,
+ struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+ {
+@@ -1179,9 +1179,23 @@ protmode_load_seg(
+ /* Code segment? */
+ if ( !(desc.b & (1u<<11)) )
+ goto raise_exn;
+- /* Non-conforming segment: check DPL against RPL. */
+- if ( ((desc.b & (6u<<9)) != (6u<<9)) && (dpl != rpl) )
++ if ( is_ret
++ ? /*
++ * Really rpl < cpl, but our sole caller doesn't handle
++ * privilege level changes.
++ */
++ rpl != cpl || (desc.b & (1 << 10) ? dpl > rpl : dpl != rpl)
++ : desc.b & (1 << 10)
++ /* Conforming segment: check DPL against CPL. */
++ ? dpl > cpl
++ /* Non-conforming segment: check RPL and DPL against CPL. */
++ : rpl > cpl || dpl != cpl )
+ goto raise_exn;
++ /* 64-bit code segments (L bit set) must have D bit clear. */
++ if ( in_longmode(ctxt, ops) &&
++ (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
++ goto raise_exn;
++ sel = (sel ^ rpl) | cpl;
+ break;
+ case x86_seg_ss:
+ /* Writable data segment? */
+@@ -1246,7 +1260,7 @@ protmode_load_seg(
+ static int
+ load_seg(
+ enum x86_segment seg,
+- uint16_t sel,
++ uint16_t sel, bool_t is_ret,
+ struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+ {
+@@ -1255,7 +1269,7 @@ load_seg(
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( in_protmode(ctxt, ops) )
+- return protmode_load_seg(seg, sel, ctxt, ops);
++ return protmode_load_seg(seg, sel, is_ret, ctxt, ops);
+
+ return realmode_load_seg(seg, sel, ctxt, ops);
+ }
+@@ -1852,7 +1866,7 @@ x86_emulate(
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &dst.val, op_bytes, ctxt, ops)) != 0 )
+ goto done;
+- if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(src.val, dst.val, 0, ctxt, ops)) != 0 )
+ return rc;
+ break;
+
+@@ -2222,7 +2236,7 @@ x86_emulate(
+ enum x86_segment seg = decode_segment(modrm_reg);
+ generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
+ generate_exception_if(seg == x86_seg_cs, EXC_UD, -1);
+- if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(seg, src.val, 0, ctxt, ops)) != 0 )
+ goto done;
+ if ( seg == x86_seg_ss )
+ ctxt->retire.flags.mov_ss = 1;
+@@ -2303,7 +2317,7 @@ x86_emulate(
+ &_regs.eip, op_bytes, ctxt)) )
+ goto done;
+
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = eip;
+ break;
+@@ -2526,7 +2540,7 @@ x86_emulate(
+ if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
+ &sel, 2, ctxt, ops)) != 0 )
+ goto done;
+- if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(dst.val, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ dst.val = src.val;
+ break;
+@@ -2600,7 +2614,7 @@ x86_emulate(
+ &dst.val, op_bytes, ctxt, ops)) ||
+ (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
+ &src.val, op_bytes, ctxt, ops)) ||
+- (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) )
++ (rc = load_seg(x86_seg_cs, src.val, 1, ctxt, ops)) )
+ goto done;
+ _regs.eip = dst.val;
+ break;
+@@ -2647,7 +2661,7 @@ x86_emulate(
+ _regs.eflags &= mask;
+ _regs.eflags |= (uint32_t)(eflags & ~mask) | 0x02;
+ _regs.eip = eip;
+- if ( (rc = load_seg(x86_seg_cs, (uint16_t)cs, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, cs, 1, ctxt, ops)) != 0 )
+ goto done;
+ break;
+ }
+@@ -3277,7 +3291,7 @@ x86_emulate(
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
+ eip = insn_fetch_bytes(op_bytes);
+ sel = insn_fetch_type(uint16_t);
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = eip;
+ break;
+@@ -3590,7 +3604,7 @@ x86_emulate(
+ goto done;
+ }
+
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = dst.val;
+
+@@ -3671,7 +3685,7 @@ x86_emulate(
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
+ if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr,
+- src.val, ctxt, ops)) != 0 )
++ src.val, 0, ctxt, ops)) != 0 )
+ goto done;
+ break;
+
diff --git a/sysutils/xenkernel41/patches/patch-CVE-2014-8866 b/sysutils/xenkernel41/patches/patch-CVE-2014-8866
new file mode 100644
index 00000000000..d65688bb667
--- /dev/null
+++ b/sysutils/xenkernel41/patches/patch-CVE-2014-8866
@@ -0,0 +1,115 @@
+$NetBSD: patch-CVE-2014-8866,v 1.1 2014/11/27 15:36:02 bouyer Exp $
+
+x86: limit checks in hypercall_xlat_continuation() to actual arguments
+
+HVM/PVH guests can otherwise trigger the final BUG_ON() in that
+function by entering 64-bit mode, setting the high halves of affected
+registers to non-zero values, leaving 64-bit mode, and issuing a
+hypercall that might get preempted and hence become subject to
+continuation argument translation (HYPERVISOR_memory_op being the only
+one possible for HVM, PVH also having the option of using
+HYPERVISOR_mmuext_op). This issue got introduced when HVM code was
+switched to use compat_memory_op() - neither that nor
+hypercall_xlat_continuation() were originally intended to be used by
+other than PV guests (which can't enter 64-bit mode and hence have no
+way to alter the high halves of 64-bit registers).
+
+This is XSA-111.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/domain.c.orig
++++ xen/arch/x86/domain.c
+@@ -1921,7 +1921,8 @@ unsigned long hypercall_create_continuat
+ }
+
+ #ifdef CONFIG_COMPAT
+-int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...)
++int hypercall_xlat_continuation(unsigned int *id, unsigned int nr,
++ unsigned int mask, ...)
+ {
+ int rc = 0;
+ struct mc_state *mcs = &current->mc_state;
+@@ -1930,7 +1931,10 @@ int hypercall_xlat_continuation(unsigned
+ unsigned long nval = 0;
+ va_list args;
+
+- BUG_ON(id && *id > 5);
++ ASSERT(nr <= ARRAY_SIZE(mcs->call.args));
++ ASSERT(!(mask >> nr));
++
++ BUG_ON(id && *id >= nr);
+ BUG_ON(id && (mask & (1U << *id)));
+
+ va_start(args, mask);
+@@ -1939,7 +1943,7 @@ int hypercall_xlat_continuation(unsigned
+ {
+ if ( !test_bit(_MCSF_call_preempted, &mcs->flags) )
+ return 0;
+- for ( i = 0; i < 6; ++i, mask >>= 1 )
++ for ( i = 0; i < nr; ++i, mask >>= 1 )
+ {
+ if ( mask & 1 )
+ {
+@@ -1967,7 +1971,7 @@ int hypercall_xlat_continuation(unsigned
+ else
+ {
+ regs = guest_cpu_user_regs();
+- for ( i = 0; i < 6; ++i, mask >>= 1 )
++ for ( i = 0; i < nr; ++i, mask >>= 1 )
+ {
+ unsigned long *reg;
+
+--- xen/common/compat/memory.c.orig
++++ xen/common/compat/memory.c
+@@ -208,7 +208,7 @@ int compat_memory_op(unsigned int cmd, X
+ break;
+
+ cmd = 0;
+- if ( hypercall_xlat_continuation(&cmd, 0x02, nat.hnd, compat) )
++ if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, compat) )
+ {
+ BUG_ON(rc != __HYPERVISOR_memory_op);
+ BUG_ON((cmd & MEMOP_CMD_MASK) != op);
+--- xen/include/xen/compat.h.orig 2013-09-10 08:42:18.000000000 +0200
++++ xen/include/xen/compat.h 2014-11-27 15:29:34.000000000 +0100
+@@ -185,7 +185,8 @@
+ CHECK_FIELD_COMMON_(k, CHECK_NAME_(k, n ## __ ## f1 ## __ ## f2 ## __ ## \
+ f3, F2), n, f1.f2.f3)
+
+-int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...);
++int hypercall_xlat_continuation(unsigned int *id, unsigned int nr,
++ unsigned int mask, ...);
+
+ /* In-place translation functons: */
+ struct start_info;
+--- xen/arch/x86/x86_64/compat/mm.c.orig 2013-09-10 08:42:18.000000000 +0200
++++ xen/arch/x86/x86_64/compat/mm.c 2014-11-27 15:21:15.000000000 +0100
+@@ -128,7 +128,7 @@
+ break;
+
+ if ( rc == __HYPERVISOR_memory_op )
+- hypercall_xlat_continuation(NULL, 0x2, nat, arg);
++ hypercall_xlat_continuation(NULL, 2, 0x2, nat, arg);
+
+ XLAT_pod_target(&cmp, nat);
+
+@@ -333,7 +333,7 @@
+ left = 1;
+ if ( arg1 != MMU_UPDATE_PREEMPTED )
+ {
+- BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops,
++ BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
+ cmp_uops));
+ if ( !test_bit(_MCSF_in_multicall, &mcs->flags) )
+ regs->_ecx += count - i;
+@@ -341,7 +341,7 @@
+ mcs->compat_call.args[1] += count - i;
+ }
+ else
+- BUG_ON(hypercall_xlat_continuation(&left, 0));
++ BUG_ON(hypercall_xlat_continuation(&left, 4, 0));
+ BUG_ON(left != arg1);
+ }
+ else
diff --git a/sysutils/xenkernel41/patches/patch-CVE-2014-8867 b/sysutils/xenkernel41/patches/patch-CVE-2014-8867
new file mode 100644
index 00000000000..bc2007d2cd4
--- /dev/null
+++ b/sysutils/xenkernel41/patches/patch-CVE-2014-8867
@@ -0,0 +1,90 @@
+$NetBSD: patch-CVE-2014-8867,v 1.1 2014/11/27 15:36:02 bouyer Exp $
+
+x86/HVM: confine internally handled MMIO to solitary regions
+
+While it is generally wrong to cross region boundaries when dealing
+with MMIO accesses of repeated string instructions (currently only
+MOVS) as that would do things a guest doesn't expect (leaving aside
+that none of these regions would normally be accessed with repeated
+string instructions in the first place), this is even more of a problem
+for all virtual MSI-X page accesses (both msixtbl_{read,write}() can be
+made dereference NULL "entry" pointers this way) as well as undersized
+(1- or 2-byte) LAPIC writes (causing vlapic_read_aligned() to access
+space beyond the one memory page set up for holding LAPIC register
+values).
+
+Since those functions validly assume to be called only with addresses
+their respective checking functions indicated to be okay, it is generic
+code that needs to be fixed to clip the repetition count.
+
+To be on the safe side (and consistent), also do the same for buffered
+I/O intercepts, even if their only client (stdvga) doesn't put the
+hypervisor at risk (i.e. "only" guest misbehavior would result).
+
+This is CVE-2014-8867 / XSA-112.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/hvm/intercept.c.orig
++++ xen/arch/x86/hvm/intercept.c
+@@ -131,11 +131,24 @@ int hvm_mmio_intercept(ioreq_t *p)
+ int i;
+
+ for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
+- if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) )
++ {
++ hvm_mmio_check_t check_handler =
++ hvm_mmio_handlers[i]->check_handler;
++
++ if ( check_handler(v, p->addr) )
++ {
++ if ( unlikely(p->count > 1) &&
++ !check_handler(v, unlikely(p->df)
++ ? p->addr - (p->count - 1LL) * p->size
++ : p->addr + (p->count - 1LL) * p->size) )
++ p->count = 1;
++
+ return hvm_mmio_access(
+ v, p,
+ hvm_mmio_handlers[i]->read_handler,
+ hvm_mmio_handlers[i]->write_handler);
++ }
++ }
+
+ return X86EMUL_UNHANDLEABLE;
+ }
+@@ -243,6 +256,13 @@ int hvm_io_intercept(ioreq_t *p, int typ
+ if ( type == HVM_PORTIO )
+ return process_portio_intercept(
+ handler->hdl_list[i].action.portio, p);
++
++ if ( unlikely(p->count > 1) &&
++ (unlikely(p->df)
++ ? p->addr - (p->count - 1LL) * p->size < addr
++ : p->addr + p->count * 1LL * p->size - 1 >= addr + size) )
++ p->count = 1;
++
+ return handler->hdl_list[i].action.mmio(p);
+ }
+ }
+--- xen/arch/x86/hvm/vmsi.c.orig
++++ xen/arch/x86/hvm/vmsi.c
+@@ -236,6 +236,8 @@ static int msixtbl_read(
+ rcu_read_lock(&msixtbl_rcu_lock);
+
+ entry = msixtbl_find_entry(v, address);
++ if ( !entry )
++ goto out;
+ offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
+
+ if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
+@@ -278,6 +280,8 @@ static int msixtbl_write(struct vcpu *v,
+ rcu_read_lock(&msixtbl_rcu_lock);
+
+ entry = msixtbl_find_entry(v, address);
++ if ( !entry )
++ goto out;
+ nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
+
+ offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
diff --git a/sysutils/xenkernel41/patches/patch-CVE-2014-9030 b/sysutils/xenkernel41/patches/patch-CVE-2014-9030
new file mode 100644
index 00000000000..9c7b44708ae
--- /dev/null
+++ b/sysutils/xenkernel41/patches/patch-CVE-2014-9030
@@ -0,0 +1,46 @@
+$NetBSD: patch-CVE-2014-9030,v 1.1 2014/11/27 15:36:02 bouyer Exp $
+
+x86/mm: fix a reference counting error in MMU_MACHPHYS_UPDATE
+
+Any domain which can pass the XSM check against a translated guest can cause a
+page reference to be leaked.
+
+While shuffling the order of checks, drop the quite-pointless MEM_LOG(). This
+brings the check in line with similar checks in the vicinity.
+
+Discovered while reviewing the XSA-109/110 followup series.
+
+This is XSA-113.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/mm.c.orig 2014-11-27 15:21:15.000000000 +0100
++++ xen/arch/x86/mm.c 2014-11-27 15:37:25.000000000 +0100
+@@ -3888,6 +3892,12 @@
+
+ case MMU_MACHPHYS_UPDATE:
+
++ if ( unlikely(paging_mode_translate(pg_owner)) )
++ {
++ rc = -EINVAL;
++ break;
++ }
++
+ mfn = req.ptr >> PAGE_SHIFT;
+ gpfn = req.val;
+
+@@ -3901,12 +3911,6 @@
+ break;
+ }
+
+- if ( unlikely(paging_mode_translate(pg_owner)) )
+- {
+- MEM_LOG("Mach-phys update on auto-translate guest");
+- break;
+- }
+-
+ set_gpfn_from_mfn(mfn, gpfn);
+ okay = 1;
+