summaryrefslogtreecommitdiff
path: root/sysutils/xenkernel42
diff options
context:
space:
mode:
authorbouyer <bouyer@pkgsrc.org>2014-11-27 15:20:31 +0000
committerbouyer <bouyer@pkgsrc.org>2014-11-27 15:20:31 +0000
commit1de0751db09b5ad9c00d0906305d52ce885c2be4 (patch)
treef1c7cb4644df1898fc4158e1cc3384fe92c140ab /sysutils/xenkernel42
parentc5e1ea04c6a371c30bc750f8bba6c09fd1e81b41 (diff)
downloadpkgsrc-1de0751db09b5ad9c00d0906305d52ce885c2be4.tar.gz
Apply patch from Xen advisory:
CVE-2014-8594/XSA-109: x86: don't allow page table updates on non-PV page tables in do_mmu_update(), fixing: Malicious or buggy stub domain kernels or tool stacks otherwise living outside of Domain0 can mount a denial of service attack which, if successful, can affect the whole system. CVE-2014-8595/XSA-110: x86emul: enforce privilege level restrictions when loading CS, fixing: Malicious HVM guest user mode code may be able to elevate its privileges to guest supervisor mode, or to crash the guest. CVE-2014-8866/XSA-111: x86: limit checks in hypercall_xlat_continuation() to actual arguments, fixing: A buggy or malicious HVM guest can crash the host. CVE-2014-8867/XSA-112: x86/HVM: confine internally handled MMIO to solitary regions, fixing: A buggy or malicious HVM guest can crash the host. CVE-2014-9030/XSA-113: x86/mm: fix a reference counting error in MMU_MACHPHYS_UPDATE, fixing: Malicious or buggy stub domain kernels or tool stacks otherwise living outside of Domain0 can mount a denial of service attack which, if successful, can affect the whole system.
Diffstat (limited to 'sysutils/xenkernel42')
-rw-r--r--sysutils/xenkernel42/Makefile4
-rw-r--r--sysutils/xenkernel42/distinfo7
-rw-r--r--sysutils/xenkernel42/patches/patch-CVE-2014-859427
-rw-r--r--sysutils/xenkernel42/patches/patch-CVE-2014-8595158
-rw-r--r--sysutils/xenkernel42/patches/patch-CVE-2014-8866133
-rw-r--r--sysutils/xenkernel42/patches/patch-CVE-2014-886790
-rw-r--r--sysutils/xenkernel42/patches/patch-CVE-2014-903047
7 files changed, 463 insertions, 3 deletions
diff --git a/sysutils/xenkernel42/Makefile b/sysutils/xenkernel42/Makefile
index 5f002bfe2d4..38746cbb461 100644
--- a/sysutils/xenkernel42/Makefile
+++ b/sysutils/xenkernel42/Makefile
@@ -1,9 +1,9 @@
-# $NetBSD: Makefile,v 1.9 2014/10/01 17:34:54 bouyer Exp $
+# $NetBSD: Makefile,v 1.10 2014/11/27 15:20:31 bouyer Exp $
VERSION= 4.2.5
DISTNAME= xen-${VERSION}
PKGNAME= xenkernel42-${VERSION}
-PKGREVISION= 1
+PKGREVISION= 2
CATEGORIES= sysutils
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${VERSION}/
diff --git a/sysutils/xenkernel42/distinfo b/sysutils/xenkernel42/distinfo
index 625754dcec4..a9b3e043caf 100644
--- a/sysutils/xenkernel42/distinfo
+++ b/sysutils/xenkernel42/distinfo
@@ -1,8 +1,13 @@
-$NetBSD: distinfo,v 1.7 2014/10/01 17:34:54 bouyer Exp $
+$NetBSD: distinfo,v 1.8 2014/11/27 15:20:31 bouyer Exp $
SHA1 (xen-4.2.5.tar.gz) = f42741e4ec174495ace70c4b17a6b9b0e60e798a
RMD160 (xen-4.2.5.tar.gz) = 7d4f7f1b32ee541d341a756b1f8da02816438d19
Size (xen-4.2.5.tar.gz) = 15671925 bytes
+SHA1 (patch-CVE-2014-8594) = 8599e5007e8f15eddc1385aa1d90accf1690952e
+SHA1 (patch-CVE-2014-8595) = 46bd285b7eb8f2e23984f7917b12af2191bfef80
+SHA1 (patch-CVE-2014-8866) = 9888e9585364681dfaa43af953eb104715cc4f99
+SHA1 (patch-CVE-2014-8867) = 576433746660f62b753088a66c5315a1a2ff8f76
+SHA1 (patch-CVE-2014-9030) = f4646ab2b0d01ad2a3bf47839fe0ffd35479b4a6
SHA1 (patch-Config.mk) = a43ed1b3304d6383dc093acd128a7f373d0ca266
SHA1 (patch-xen_Makefile) = e0d1b74518b9675ddc64295d1523ded9a8757c0a
SHA1 (patch-xen_arch_x86_Rules.mk) = 6b9b4bfa28924f7d3f6c793a389f1a7ac9d228e2
diff --git a/sysutils/xenkernel42/patches/patch-CVE-2014-8594 b/sysutils/xenkernel42/patches/patch-CVE-2014-8594
new file mode 100644
index 00000000000..4c59c498ca3
--- /dev/null
+++ b/sysutils/xenkernel42/patches/patch-CVE-2014-8594
@@ -0,0 +1,27 @@
+$NetBSD: patch-CVE-2014-8594,v 1.1 2014/11/27 15:20:31 bouyer Exp $
+
+x86: don't allow page table updates on non-PV page tables in do_mmu_update()
+
+paging_write_guest_entry() and paging_cmpxchg_guest_entry() aren't
+consistently supported for non-PV guests (they'd deref NULL for PVH or
+non-HAP HVM ones). Don't allow respective MMU_* operations on the
+page tables of such domains.
+
+This is XSA-109.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/mm.c.orig
++++ xen/arch/x86/mm.c
+@@ -3800,6 +3800,10 @@ long do_mmu_update(
+ {
+ p2m_type_t p2mt;
+
++ rc = -EOPNOTSUPP;
++ if ( unlikely(paging_mode_refcounts(pt_owner)) )
++ break;
++
+ rc = xsm_mmu_normal_update(d, pt_owner, pg_owner, req.val);
+ if ( rc )
+ break;
diff --git a/sysutils/xenkernel42/patches/patch-CVE-2014-8595 b/sysutils/xenkernel42/patches/patch-CVE-2014-8595
new file mode 100644
index 00000000000..b782c9adc90
--- /dev/null
+++ b/sysutils/xenkernel42/patches/patch-CVE-2014-8595
@@ -0,0 +1,158 @@
+$NetBSD: patch-CVE-2014-8595,v 1.1 2014/11/27 15:20:31 bouyer Exp $
+
+x86emul: enforce privilege level restrictions when loading CS
+
+Privilege level checks were basically missing for the CS case, the
+only check that was done (RPL == DPL for nonconforming segments)
+was solely covering a single special case (return to non-conforming
+segment).
+
+Additionally in long mode the L bit set requires the D bit to be clear,
+as was recently pointed out for KVM by Nadav Amit
+<namit@cs.technion.ac.il>.
+
+Finally we also need to force the loaded selector's RPL to CPL (at
+least as long as lret/retf emulation doesn't support privilege level
+changes).
+
+This is XSA-110.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/x86_emulate/x86_emulate.c.orig
++++ xen/arch/x86/x86_emulate/x86_emulate.c
+@@ -1107,7 +1107,7 @@ realmode_load_seg(
+ static int
+ protmode_load_seg(
+ enum x86_segment seg,
+- uint16_t sel,
++ uint16_t sel, bool_t is_ret,
+ struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+ {
+@@ -1179,9 +1179,23 @@ protmode_load_seg(
+ /* Code segment? */
+ if ( !(desc.b & (1u<<11)) )
+ goto raise_exn;
+- /* Non-conforming segment: check DPL against RPL. */
+- if ( ((desc.b & (6u<<9)) != (6u<<9)) && (dpl != rpl) )
++ if ( is_ret
++ ? /*
++ * Really rpl < cpl, but our sole caller doesn't handle
++ * privilege level changes.
++ */
++ rpl != cpl || (desc.b & (1 << 10) ? dpl > rpl : dpl != rpl)
++ : desc.b & (1 << 10)
++ /* Conforming segment: check DPL against CPL. */
++ ? dpl > cpl
++ /* Non-conforming segment: check RPL and DPL against CPL. */
++ : rpl > cpl || dpl != cpl )
+ goto raise_exn;
++ /* 64-bit code segments (L bit set) must have D bit clear. */
++ if ( in_longmode(ctxt, ops) &&
++ (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
++ goto raise_exn;
++ sel = (sel ^ rpl) | cpl;
+ break;
+ case x86_seg_ss:
+ /* Writable data segment? */
+@@ -1246,7 +1260,7 @@ protmode_load_seg(
+ static int
+ load_seg(
+ enum x86_segment seg,
+- uint16_t sel,
++ uint16_t sel, bool_t is_ret,
+ struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+ {
+@@ -1255,7 +1269,7 @@ load_seg(
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( in_protmode(ctxt, ops) )
+- return protmode_load_seg(seg, sel, ctxt, ops);
++ return protmode_load_seg(seg, sel, is_ret, ctxt, ops);
+
+ return realmode_load_seg(seg, sel, ctxt, ops);
+ }
+@@ -1852,7 +1866,7 @@ x86_emulate(
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &dst.val, op_bytes, ctxt, ops)) != 0 )
+ goto done;
+- if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(src.val, dst.val, 0, ctxt, ops)) != 0 )
+ return rc;
+ break;
+
+@@ -2222,7 +2236,7 @@ x86_emulate(
+ enum x86_segment seg = decode_segment(modrm_reg);
+ generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
+ generate_exception_if(seg == x86_seg_cs, EXC_UD, -1);
+- if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(seg, src.val, 0, ctxt, ops)) != 0 )
+ goto done;
+ if ( seg == x86_seg_ss )
+ ctxt->retire.flags.mov_ss = 1;
+@@ -2303,7 +2317,7 @@ x86_emulate(
+ &_regs.eip, op_bytes, ctxt)) )
+ goto done;
+
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = eip;
+ break;
+@@ -2526,7 +2540,7 @@ x86_emulate(
+ if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
+ &sel, 2, ctxt, ops)) != 0 )
+ goto done;
+- if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(dst.val, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ dst.val = src.val;
+ break;
+@@ -2600,7 +2614,7 @@ x86_emulate(
+ &dst.val, op_bytes, ctxt, ops)) ||
+ (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
+ &src.val, op_bytes, ctxt, ops)) ||
+- (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) )
++ (rc = load_seg(x86_seg_cs, src.val, 1, ctxt, ops)) )
+ goto done;
+ _regs.eip = dst.val;
+ break;
+@@ -2647,7 +2661,7 @@ x86_emulate(
+ _regs.eflags &= mask;
+ _regs.eflags |= (uint32_t)(eflags & ~mask) | 0x02;
+ _regs.eip = eip;
+- if ( (rc = load_seg(x86_seg_cs, (uint16_t)cs, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, cs, 1, ctxt, ops)) != 0 )
+ goto done;
+ break;
+ }
+@@ -3277,7 +3291,7 @@ x86_emulate(
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
+ eip = insn_fetch_bytes(op_bytes);
+ sel = insn_fetch_type(uint16_t);
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = eip;
+ break;
+@@ -3590,7 +3604,7 @@ x86_emulate(
+ goto done;
+ }
+
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = dst.val;
+
+@@ -3671,7 +3685,7 @@ x86_emulate(
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
+ if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr,
+- src.val, ctxt, ops)) != 0 )
++ src.val, 0, ctxt, ops)) != 0 )
+ goto done;
+ break;
+
diff --git a/sysutils/xenkernel42/patches/patch-CVE-2014-8866 b/sysutils/xenkernel42/patches/patch-CVE-2014-8866
new file mode 100644
index 00000000000..1fb0b45cd3a
--- /dev/null
+++ b/sysutils/xenkernel42/patches/patch-CVE-2014-8866
@@ -0,0 +1,133 @@
+$NetBSD: patch-CVE-2014-8866,v 1.1 2014/11/27 15:20:31 bouyer Exp $
+
+x86: limit checks in hypercall_xlat_continuation() to actual arguments
+
+HVM/PVH guests can otherwise trigger the final BUG_ON() in that
+function by entering 64-bit mode, setting the high halves of affected
+registers to non-zero values, leaving 64-bit mode, and issuing a
+hypercall that might get preempted and hence become subject to
+continuation argument translation (HYPERVISOR_memory_op being the only
+one possible for HVM, PVH also having the option of using
+HYPERVISOR_mmuext_op). This issue got introduced when HVM code was
+switched to use compat_memory_op() - neither that nor
+hypercall_xlat_continuation() were originally intended to be used by
+other than PV guests (which can't enter 64-bit mode and hence have no
+way to alter the high halves of 64-bit registers).
+
+This is XSA-111.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/domain.c.orig
++++ xen/arch/x86/domain.c
+@@ -1921,7 +1921,8 @@ unsigned long hypercall_create_continuat
+ }
+
+ #ifdef CONFIG_COMPAT
+-int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...)
++int hypercall_xlat_continuation(unsigned int *id, unsigned int nr,
++ unsigned int mask, ...)
+ {
+ int rc = 0;
+ struct mc_state *mcs = &current->mc_state;
+@@ -1930,7 +1931,10 @@ int hypercall_xlat_continuation(unsigned
+ unsigned long nval = 0;
+ va_list args;
+
+- BUG_ON(id && *id > 5);
++ ASSERT(nr <= ARRAY_SIZE(mcs->call.args));
++ ASSERT(!(mask >> nr));
++
++ BUG_ON(id && *id >= nr);
+ BUG_ON(id && (mask & (1U << *id)));
+
+ va_start(args, mask);
+@@ -1939,7 +1943,7 @@ int hypercall_xlat_continuation(unsigned
+ {
+ if ( !test_bit(_MCSF_call_preempted, &mcs->flags) )
+ return 0;
+- for ( i = 0; i < 6; ++i, mask >>= 1 )
++ for ( i = 0; i < nr; ++i, mask >>= 1 )
+ {
+ if ( mask & 1 )
+ {
+@@ -1967,7 +1971,7 @@ int hypercall_xlat_continuation(unsigned
+ else
+ {
+ regs = guest_cpu_user_regs();
+- for ( i = 0; i < 6; ++i, mask >>= 1 )
++ for ( i = 0; i < nr; ++i, mask >>= 1 )
+ {
+ unsigned long *reg;
+
+--- xen/arch/x86/x86_64/compat/mm.c.orig
++++ xen/arch/x86/x86_64/compat/mm.c
+@@ -78,7 +78,7 @@ int compat_arch_memory_op(int op, XEN_GU
+ }
+
+ if ( rc == __HYPERVISOR_memory_op )
+- hypercall_xlat_continuation(NULL, 0x2, nat, arg);
++ hypercall_xlat_continuation(NULL, 2, 0x2, nat, arg);
+
+ break;
+ }
+@@ -144,7 +144,7 @@ int compat_arch_memory_op(int op, XEN_GU
+ break;
+
+ if ( rc == __HYPERVISOR_memory_op )
+- hypercall_xlat_continuation(NULL, 0x2, nat, arg);
++ hypercall_xlat_continuation(NULL, 2, 0x2, nat, arg);
+
+ XLAT_pod_target(&cmp, nat);
+
+@@ -379,7 +379,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
+ left = 1;
+ if ( arg1 != MMU_UPDATE_PREEMPTED )
+ {
+- BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops,
++ BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
+ cmp_uops));
+ if ( !test_bit(_MCSF_in_multicall, &mcs->flags) )
+ regs->_ecx += count - i;
+@@ -387,7 +387,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
+ mcs->compat_call.args[1] += count - i;
+ }
+ else
+- BUG_ON(hypercall_xlat_continuation(&left, 0));
++ BUG_ON(hypercall_xlat_continuation(&left, 4, 0));
+ BUG_ON(left != arg1);
+ }
+ else
+--- xen/common/compat/memory.c.orig
++++ xen/common/compat/memory.c
+@@ -208,7 +208,7 @@ int compat_memory_op(unsigned int cmd, X
+ break;
+
+ cmd = 0;
+- if ( hypercall_xlat_continuation(&cmd, 0x02, nat.hnd, compat) )
++ if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, compat) )
+ {
+ BUG_ON(rc != __HYPERVISOR_memory_op);
+ BUG_ON((cmd & MEMOP_CMD_MASK) != op);
+--- xen/include/xen/compat.h.orig
++++ xen/include/xen/compat.h
+@@ -192,6 +192,8 @@ static inline int name(k xen_ ## n *x, k
+ * This option is useful for extracting the "op" argument or similar from the
+ * hypercall to enable further xlat processing.
+ *
++ * nr: Total number of arguments the hypercall has.
++ *
+ * mask: Specifies which of the hypercall arguments require compat translation.
+ * bit 0 indicates that the 0'th argument requires translation, bit 1 indicates
+ * that the first argument requires translation and so on. Native and compat
+@@ -211,7 +213,8 @@ static inline int name(k xen_ ## n *x, k
+ *
+ * Return: Number of arguments which were actually translated.
+ */
+-int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...);
++int hypercall_xlat_continuation(unsigned int *id, unsigned int nr,
++ unsigned int mask, ...);
+
+ /* In-place translation functons: */
+ struct start_info;
diff --git a/sysutils/xenkernel42/patches/patch-CVE-2014-8867 b/sysutils/xenkernel42/patches/patch-CVE-2014-8867
new file mode 100644
index 00000000000..9c239f04b92
--- /dev/null
+++ b/sysutils/xenkernel42/patches/patch-CVE-2014-8867
@@ -0,0 +1,90 @@
+$NetBSD: patch-CVE-2014-8867,v 1.1 2014/11/27 15:20:31 bouyer Exp $
+
+x86/HVM: confine internally handled MMIO to solitary regions
+
+While it is generally wrong to cross region boundaries when dealing
+with MMIO accesses of repeated string instructions (currently only
+MOVS) as that would do things a guest doesn't expect (leaving aside
+that none of these regions would normally be accessed with repeated
+string instructions in the first place), this is even more of a problem
+for all virtual MSI-X page accesses (both msixtbl_{read,write}() can be
+made dereference NULL "entry" pointers this way) as well as undersized
+(1- or 2-byte) LAPIC writes (causing vlapic_read_aligned() to access
+space beyond the one memory page set up for holding LAPIC register
+values).
+
+Since those functions validly assume to be called only with addresses
+their respective checking functions indicated to be okay, it is generic
+code that needs to be fixed to clip the repetition count.
+
+To be on the safe side (and consistent), also do the same for buffered
+I/O intercepts, even if their only client (stdvga) doesn't put the
+hypervisor at risk (i.e. "only" guest misbehavior would result).
+
+This is CVE-2014-8867 / XSA-112.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/hvm/intercept.c.orig
++++ xen/arch/x86/hvm/intercept.c
+@@ -131,11 +131,24 @@ int hvm_mmio_intercept(ioreq_t *p)
+ int i;
+
+ for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
+- if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) )
++ {
++ hvm_mmio_check_t check_handler =
++ hvm_mmio_handlers[i]->check_handler;
++
++ if ( check_handler(v, p->addr) )
++ {
++ if ( unlikely(p->count > 1) &&
++ !check_handler(v, unlikely(p->df)
++ ? p->addr - (p->count - 1LL) * p->size
++ : p->addr + (p->count - 1LL) * p->size) )
++ p->count = 1;
++
+ return hvm_mmio_access(
+ v, p,
+ hvm_mmio_handlers[i]->read_handler,
+ hvm_mmio_handlers[i]->write_handler);
++ }
++ }
+
+ return X86EMUL_UNHANDLEABLE;
+ }
+@@ -243,6 +256,13 @@ int hvm_io_intercept(ioreq_t *p, int typ
+ if ( type == HVM_PORTIO )
+ return process_portio_intercept(
+ handler->hdl_list[i].action.portio, p);
++
++ if ( unlikely(p->count > 1) &&
++ (unlikely(p->df)
++ ? p->addr - (p->count - 1LL) * p->size < addr
++ : p->addr + p->count * 1LL * p->size - 1 >= addr + size) )
++ p->count = 1;
++
+ return handler->hdl_list[i].action.mmio(p);
+ }
+ }
+--- xen/arch/x86/hvm/vmsi.c.orig
++++ xen/arch/x86/hvm/vmsi.c
+@@ -236,6 +236,8 @@ static int msixtbl_read(
+ rcu_read_lock(&msixtbl_rcu_lock);
+
+ entry = msixtbl_find_entry(v, address);
++ if ( !entry )
++ goto out;
+ offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
+
+ if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
+@@ -278,6 +280,8 @@ static int msixtbl_write(struct vcpu *v,
+ rcu_read_lock(&msixtbl_rcu_lock);
+
+ entry = msixtbl_find_entry(v, address);
++ if ( !entry )
++ goto out;
+ nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
+
+ offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
diff --git a/sysutils/xenkernel42/patches/patch-CVE-2014-9030 b/sysutils/xenkernel42/patches/patch-CVE-2014-9030
new file mode 100644
index 00000000000..6d48213b40d
--- /dev/null
+++ b/sysutils/xenkernel42/patches/patch-CVE-2014-9030
@@ -0,0 +1,47 @@
+$NetBSD: patch-CVE-2014-9030,v 1.1 2014/11/27 15:20:31 bouyer Exp $
+
+x86/mm: fix a reference counting error in MMU_MACHPHYS_UPDATE
+
+Any domain which can pass the XSM check against a translated guest can cause a
+page reference to be leaked.
+
+While shuffling the order of checks, drop the quite-pointless MEM_LOG(). This
+brings the check in line with similar checks in the vicinity.
+
+Discovered while reviewing the XSA-109/110 followup series.
+
+This is XSA-113.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- xen/arch/x86/mm.c.orig
++++ xen/arch/x86/mm.c
+@@ -3619,6 +3619,12 @@ long do_mmu_update(
+
+ case MMU_MACHPHYS_UPDATE:
+
++ if ( unlikely(paging_mode_translate(pg_owner)) )
++ {
++ rc = -EINVAL;
++ break;
++ }
++
+ mfn = req.ptr >> PAGE_SHIFT;
+ gpfn = req.val;
+
+@@ -3638,13 +3644,6 @@ long do_mmu_update(
+ break;
+ }
+
+- if ( unlikely(paging_mode_translate(pg_owner)) )
+- {
+- MEM_LOG("Mach-phys update on auto-translate guest");
+- rc = -EINVAL;
+- break;
+- }
+-
+ set_gpfn_from_mfn(mfn, gpfn);
+
+ paging_mark_dirty(pg_owner, mfn);