diff options
author | miao chen - Sun Microsystems - Beijing China <Miao.Chen@Sun.COM> | 2009-12-05 13:25:40 +0800 |
---|---|---|
committer | miao chen - Sun Microsystems - Beijing China <Miao.Chen@Sun.COM> | 2009-12-05 13:25:40 +0800 |
commit | 0035d21c77a24d02faf34c10aabc120ca692efb5 (patch) | |
tree | 10cfba243ff76ec208d28baf0a4bad8d2ac853c5 /usr/src/uts/intel/io/agpgart | |
parent | c2e5330e09ea2d4fb7299851f5ebf26155c2117f (diff) | |
download | illumos-gate-0035d21c77a24d02faf34c10aabc120ca692efb5.tar.gz |
PSARC 2009/425 Additional ioctls for GEM support in i915 driver
PSARC 2009/474 Additional IOCTL Support in Agpgart Driver
6815826 GEM should be supported in drm driver
6904304 System panic in pci_get_available_prop() in busra.c
Diffstat (limited to 'usr/src/uts/intel/io/agpgart')
-rw-r--r-- | usr/src/uts/intel/io/agpgart/agpgart.c | 264 | ||||
-rw-r--r-- | usr/src/uts/intel/io/agpgart/agptarget.c | 201 |
2 files changed, 460 insertions, 5 deletions
diff --git a/usr/src/uts/intel/io/agpgart/agpgart.c b/usr/src/uts/intel/io/agpgart/agpgart.c index 005dc57ac0..0f7ce74458 100644 --- a/usr/src/uts/intel/io/agpgart/agpgart.c +++ b/usr/src/uts/intel/io/agpgart/agpgart.c @@ -1,5 +1,10 @@ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2009, Intel Corporation. + * All Rights Reserved. + */ + +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* @@ -59,8 +64,55 @@ static void *agpgart_glob_soft_handle; #define IS_TRUE_AGP(type) (((type) == ARC_INTELAGP) || \ ((type) == ARC_AMD64AGP)) +#define AGP_HASH_NODE 1024 + +static void +list_head_init(struct list_head *head) { + struct list_head *entry, *tmp; + /* HASH for accelerate */ + entry = kmem_zalloc(AGP_HASH_NODE * + sizeof (struct list_head), KM_NOSLEEP); + head->next = entry; + for (int i = 0; i < AGP_HASH_NODE; i++) { + tmp = &entry[i]; + tmp->next = tmp; + tmp->prev = tmp; + tmp->gttseg = NULL; + } +} + +static void +list_head_add_new(struct list_head *head, + igd_gtt_seg_t *gttseg) +{ + struct list_head *entry, *tmp; + int key; + entry = kmem_zalloc(sizeof (*entry), KM_NOSLEEP); + key = gttseg->igs_pgstart % AGP_HASH_NODE; + tmp = &head->next[key]; + tmp->next->prev = entry; + entry->next = tmp->next; + entry->prev = tmp; + tmp->next = entry; + entry->gttseg = gttseg; +} + +static void +list_head_del(struct list_head *entry) { + (entry)->next->prev = (entry)->prev; \ + (entry)->prev->next = (entry)->next; \ + (entry)->gttseg = NULL; \ +} + +#define list_head_for_each_safe(entry, temp, head) \ + for (int key = 0; key < AGP_HASH_NODE; key++) \ + for (entry = (&(head)->next[key])->next, temp = (entry)->next; \ + entry != &(head)->next[key]; \ + entry = temp, temp = temp->next) + + #define agpinfo_default_to_32(v, v32) \ - { \ + { \ (v32).agpi32_version = (v).agpi_version; \ (v32).agpi32_devid = (v).agpi_devid; \ (v32).agpi32_mode = (v).agpi_mode; \ @@ -1134,7 +1186,7 @@ lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev) * based on the ammount of physical pages. * The algorithm is: compare the aperture size with 1/4 of total * physical pages, and use the smaller one to for the max available - * pages. + * pages. But the minimum video memory should be 192M. * * Arguments: * aper_size system agp aperture size (in MB) @@ -1145,14 +1197,18 @@ lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev) static uint32_t get_max_pages(uint32_t aper_size) { - uint32_t i, j; + uint32_t i, j, size; ASSERT(aper_size <= MAXAPERMEGAS); i = AGP_MB2PAGES(aper_size); j = (physmem >> 2); - return ((i < j) ? i : j); + size = ((i < j) ? i : j); + + if (size < AGP_MB2PAGES(MINAPERMEGAS)) + size = AGP_MB2PAGES(MINAPERMEGAS); + return (size); } /* @@ -2438,6 +2494,8 @@ agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) AGP_MAXKEYS * (sizeof (keytable_ent_t)), KM_SLEEP); + list_head_init(&softstate->mapped_list); + return (DDI_SUCCESS); err4: agp_fini_kstats(softstate); @@ -2484,6 +2542,21 @@ agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) st->asoft_table = 0; } + struct list_head *entry, *temp, *head; + igd_gtt_seg_t *gttseg; + list_head_for_each_safe(entry, temp, &st->mapped_list) { + gttseg = entry->gttseg; + list_head_del(entry); + kmem_free(entry, sizeof (*entry)); + kmem_free(gttseg->igs_phyaddr, + sizeof (uint32_t) * gttseg->igs_npage); + kmem_free(gttseg, sizeof (igd_gtt_seg_t)); + } + head = &st->mapped_list; + kmem_free(head->next, + AGP_HASH_NODE * sizeof (struct list_head)); + head->next = NULL; + ddi_remove_minor_node(dip, AGPGART_DEVNODE); agp_fini_kstats(st); ldi_ident_release(st->asoft_li); @@ -2557,6 +2630,7 @@ agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp) int instance = AGP_DEV2INST(*dev); agpgart_softstate_t *softstate; int rc = 0; + uint32_t devid; if (secpolicy_gart_access(credp)) { AGPDB_PRINT2((CE_WARN, "agpgart_open: permission denied")); @@ -2612,6 +2686,21 @@ agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp) return (EIO); } + devid = softstate->asoft_info.agpki_mdevid; + if (IS_INTEL_915(devid) || + IS_INTEL_965(devid) || + IS_INTEL_X33(devid) || + IS_INTEL_G4X(devid)) { + rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl, + INTEL_CHIPSET_FLUSH_SETUP, 0, FKIOCTL, kcred, 0); + } + if (rc) { + AGPDB_PRINT2((CE_WARN, + "agpgart_open: Intel chipset flush setup error")); + lyr_end(&softstate->asoft_devreg); + mutex_exit(&softstate->asoft_instmutex); + return (EIO); + } mutex_exit(&softstate->asoft_instmutex); return (0); } @@ -2698,6 +2787,8 @@ agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp) { int instance = AGP_DEV2INST(dev); agpgart_softstate_t *softstate; + int rc = 0; + uint32_t devid; softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); if (softstate == NULL) { @@ -2721,6 +2812,19 @@ agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp) release_control(softstate); } + devid = softstate->asoft_info.agpki_mdevid; + if (IS_INTEL_915(devid) || + IS_INTEL_965(devid) || + IS_INTEL_X33(devid) || + IS_INTEL_G4X(devid)) { + rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl, + INTEL_CHIPSET_FLUSH_FREE, 0, FKIOCTL, kcred, 0); + } + if (rc) { + AGPDB_PRINT2((CE_WARN, + "agpgart_open: Intel chipset flush free error")); + } + if (lyr_unconfig_devices(&softstate->asoft_devreg)) { AGPDB_PRINT2((CE_WARN, "agpgart_close: lyr_unconfig_device error")); @@ -3020,6 +3124,144 @@ ioctl_agpgart_unbind(agpgart_softstate_t *st, void *arg, int flags) return (0); } +static int +ioctl_agpgart_flush_chipset(agpgart_softstate_t *st) +{ + ldi_handle_t hdl; + uint32_t devid; + int rc = 0; + devid = st->asoft_info.agpki_mdevid; + hdl = st->asoft_devreg.agprd_targethdl; + if (IS_INTEL_915(devid) || + IS_INTEL_965(devid) || + IS_INTEL_X33(devid) || + IS_INTEL_G4X(devid)) { + rc = ldi_ioctl(hdl, INTEL_CHIPSET_FLUSH, 0, FKIOCTL, kcred, 0); + } + return (rc); +} + +static int +ioctl_agpgart_pages_bind(agpgart_softstate_t *st, void *arg, int flags) +{ + agp_bind_pages_t bind_info; + uint32_t pg_offset; + int err = 0; + ldi_handle_t hdl; + uint32_t npages; + igd_gtt_seg_t *gttseg; + uint32_t i; + int rval; + if (ddi_copyin(arg, &bind_info, + sizeof (agp_bind_pages_t), flags) != 0) { + return (EFAULT); + } + + gttseg = (igd_gtt_seg_t *)kmem_zalloc(sizeof (igd_gtt_seg_t), + KM_SLEEP); + + pg_offset = bind_info.agpb_pgstart; + + gttseg->igs_pgstart = pg_offset; + npages = (uint32_t)bind_info.agpb_pgcount; + gttseg->igs_npage = npages; + + gttseg->igs_type = AGP_NORMAL; + gttseg->igs_phyaddr = (uint32_t *)kmem_zalloc + (sizeof (uint32_t) * gttseg->igs_npage, KM_SLEEP); + + for (i = 0; i < npages; i++) { + gttseg->igs_phyaddr[i] = bind_info.agpb_pages[i] << + GTT_PAGE_SHIFT; + } + + hdl = st->asoft_devreg.agprd_masterhdl; + if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)gttseg, FKIOCTL, + kcred, &rval)) { + AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: start0x%x", + gttseg->igs_pgstart)); + AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: pages=0x%x", + gttseg->igs_npage)); + AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: type=0x%x", + gttseg->igs_type)); + err = -1; + } + + list_head_add_new(&st->mapped_list, gttseg); + return (err); +} + +static int +ioctl_agpgart_pages_unbind(agpgart_softstate_t *st, void *arg, int flags) +{ + agp_unbind_pages_t unbind_info; + int rval; + ldi_handle_t hdl; + igd_gtt_seg_t *gttseg; + + if (ddi_copyin(arg, &unbind_info, sizeof (unbind_info), flags) != 0) { + return (EFAULT); + } + + struct list_head *entry, *temp; + list_head_for_each_safe(entry, temp, &st->mapped_list) { + if (entry->gttseg->igs_pgstart == unbind_info.agpb_pgstart) { + gttseg = entry->gttseg; + /* not unbind if VT switch */ + if (unbind_info.agpb_type) { + list_head_del(entry); + kmem_free(entry, sizeof (*entry)); + } + break; + } + } + ASSERT(gttseg != NULL); + gttseg->igs_pgstart = unbind_info.agpb_pgstart; + ASSERT(gttseg->igs_npage == unbind_info.agpb_pgcount); + + hdl = st->asoft_devreg.agprd_masterhdl; + if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)gttseg, FKIOCTL, + kcred, &rval)) + return (-1); + + if (unbind_info.agpb_type) { + kmem_free(gttseg->igs_phyaddr, sizeof (uint32_t) * + gttseg->igs_npage); + kmem_free(gttseg, sizeof (igd_gtt_seg_t)); + } + + return (0); +} + +static int +ioctl_agpgart_pages_rebind(agpgart_softstate_t *st) +{ + int rval; + ldi_handle_t hdl; + igd_gtt_seg_t *gttseg; + int err = 0; + + hdl = st->asoft_devreg.agprd_masterhdl; + struct list_head *entry, *temp; + list_head_for_each_safe(entry, temp, &st->mapped_list) { + gttseg = entry->gttseg; + list_head_del(entry); + kmem_free(entry, sizeof (*entry)); + if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)gttseg, FKIOCTL, + kcred, &rval)) { + AGPDB_PRINT2((CE_WARN, "agpgart_pages_rebind errori")); + err = -1; + break; + } + kmem_free(gttseg->igs_phyaddr, sizeof (uint32_t) * + gttseg->igs_npage); + kmem_free(gttseg, sizeof (igd_gtt_seg_t)); + + } + return (err); + +} + /*ARGSUSED*/ static int agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags, @@ -3065,6 +3307,18 @@ agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags, case AGPIOC_UNBIND: retval = ioctl_agpgart_unbind(softstate, arg, flags); break; + case AGPIOC_FLUSHCHIPSET: + retval = ioctl_agpgart_flush_chipset(softstate); + break; + case AGPIOC_PAGES_BIND: + retval = ioctl_agpgart_pages_bind(softstate, arg, flags); + break; + case AGPIOC_PAGES_UNBIND: + retval = ioctl_agpgart_pages_unbind(softstate, arg, flags); + break; + case AGPIOC_PAGES_REBIND: + retval = ioctl_agpgart_pages_rebind(softstate); + break; default: AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument")); retval = ENXIO; diff --git a/usr/src/uts/intel/io/agpgart/agptarget.c b/usr/src/uts/intel/io/agpgart/agptarget.c index d9b710b842..f0c1808abb 100644 --- a/usr/src/uts/intel/io/agpgart/agptarget.c +++ b/usr/src/uts/intel/io/agpgart/agptarget.c @@ -20,6 +20,11 @@ */ /* + * Copyright (c) 2009, Intel Corporation. + * All Rights Reserved. + */ + +/* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -32,6 +37,7 @@ #include <sys/stat.h> #include <sys/ddi.h> #include <sys/sunddi.h> +#include <sys/sunndi.h> #include <sys/modctl.h> #include <sys/sunldi.h> #include <sys/pci.h> @@ -44,6 +50,37 @@ int agptarget_debug_var = 0; #define INST2NODENUM(inst) (inst) #define DEV2INST(dev) (getminor(dev)) +static ddi_device_acc_attr_t dev_attr = { + DDI_DEVICE_ATTR_V0, + DDI_NEVERSWAP_ACC, + DDI_STRICTORDER_ACC, +}; + +static struct _i9xx_private_compat { + uint64_t physical; /* physical address */ + uint_t size; /* size of mapping */ + uint_t regnum; /* register number */ + caddr_t flush_page; /* kernel virtual address */ + ddi_acc_handle_t handle; /* data access handle */ +} i9xx_private; + +#define I915_IFPADDR 0x60 +#define I965_IFPADDR 0x70 + +#define HIADDR(n) ((uint32_t)(((uint64_t)(n) & \ + 0xFFFFFFFF00000000ULL) >> 32)) +#define LOADDR(n) ((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF)) + +/* + * Using for GEM to flush the chipset global + * write buffers on certain intel chipset + */ + +static void +intel_chipset_flush_setup(dev_info_t *dip, + ddi_acc_handle_t pci_acc_hdl, + int gms_off); + typedef struct agp_target_softstate { dev_info_t *tsoft_dip; ddi_acc_handle_t tsoft_pcihdl; @@ -376,6 +413,8 @@ static gms_mode_t gms_modes[] = { {INTEL_BR_G45, I8XX_CONF_GC, I8XX_GC_MODE_MASK, GMS_SIZE(gms_G4X), gms_G4X}, {INTEL_BR_G41, I8XX_CONF_GC, I8XX_GC_MODE_MASK, + GMS_SIZE(gms_G4X), gms_G4X}, + {INTEL_BR_B43, I8XX_CONF_GC, I8XX_GC_MODE_MASK, GMS_SIZE(gms_G4X), gms_G4X} }; static int @@ -489,6 +528,145 @@ intel_br_suspend(agp_target_softstate_t *softstate) return (DDI_SUCCESS); } +static void +intel_chipset_flush_setup(dev_info_t *dip, + ddi_acc_handle_t pci_acc_hdl, int gms_off) +{ + uint32_t temp_hi, temp_lo; + ndi_ra_request_t request; + uint64_t answer; + uint64_t alen; + pci_regspec_t *regs, *regs2; + int n_reg, length; + uint32_t i, regnum, ret; + ddi_acc_handle_t conf_hdl = pci_acc_hdl; + uint32_t phys_hi_mask = 0; + + bzero((caddr_t)&request, sizeof (ndi_ra_request_t)); + request.ra_flags |= NDI_RA_ALIGN_SIZE | NDI_RA_ALLOC_BOUNDED; + request.ra_boundbase = 0; + request.ra_boundlen = 0xffffffff; + request.ra_len = AGP_PAGE_SIZE; + + /* IS_I965 || IS_G33 || IS_G4X */ + if (gms_off > 11) { + temp_hi = pci_config_get32(conf_hdl, I965_IFPADDR + 4); + temp_lo = pci_config_get32(conf_hdl, I965_IFPADDR); + phys_hi_mask |= PCI_ADDR_MEM64 | I965_IFPADDR; + } else { + temp_lo = pci_config_get32(conf_hdl, I915_IFPADDR); + phys_hi_mask |= PCI_ADDR_MEM32 | I915_IFPADDR; + } + + if (!(temp_lo & 0x1)) { + /* allocate space from the allocator */ + if (ndi_ra_alloc(ddi_get_parent(dip), + &request, &answer, &alen, + NDI_RA_TYPE_MEM, NDI_RA_PASS) + != NDI_SUCCESS) { + return; + } + TARGETDB_PRINT2((CE_WARN, "addr = 0x%x.0x%x len [0x%x]\n", + HIADDR(answer), + LOADDR(answer), + (uint32_t)alen)); + + if (gms_off > 11) { + pci_config_put32(conf_hdl, I965_IFPADDR + 4, + HIADDR(answer)); + pci_config_put32(conf_hdl, I965_IFPADDR, + LOADDR(answer) | 0x1); + } else { + pci_config_put32(conf_hdl, I915_IFPADDR, + LOADDR(answer) | 0x1); + } + } + else + { + temp_lo &= ~0x1; + answer = ((uint64_t)temp_hi << 32) | temp_lo; + } + + temp_hi = pci_config_get32(conf_hdl, I965_IFPADDR + 4); + temp_lo = pci_config_get32(conf_hdl, I965_IFPADDR); + + /* set pci props */ + if (ddi_dev_nregs(dip, &n_reg) == DDI_FAILURE) { + TARGETDB_PRINT2((CE_WARN, "init_chipset_flush failed")); + n_reg = 0; + return; + } + + if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, + "reg", (caddr_t)®s, &length) != + DDI_PROP_SUCCESS) { + TARGETDB_PRINT2((CE_WARN, "init_chipset_flush failed!")); + return; + } + + regnum = length / sizeof (pci_regspec_t); + + TARGETDB_PRINT2((CE_WARN, "reg regnum %d", regnum)); + + regs2 = kmem_alloc((regnum + 1) * sizeof (pci_regspec_t), KM_SLEEP); + if (regs2 == NULL) { + + TARGETDB_PRINT2((CE_WARN, "init_chipset_flush failed")); + goto error; + } + if (memcpy(regs2, regs, (size_t)length) == NULL) { + TARGETDB_PRINT2((CE_WARN, "init_chipset_flush failed")); + kmem_free(regs2, (regnum + 1) * sizeof (pci_regspec_t)); + goto error; + } + + /* Bus=0, Dev=0, Func=0 0x82001000 */ + regs2[regnum].pci_phys_hi = PCI_REG_REL_M | phys_hi_mask; + regs2[regnum].pci_phys_mid = HIADDR(answer); + regs2[regnum].pci_phys_low = LOADDR(answer); + regs2[regnum].pci_size_hi = 0x00000000; + regs2[regnum].pci_size_low = AGP_PAGE_SIZE; + kmem_free(regs, (size_t)length); + regs = regs2; + + i = ndi_prop_update_int_array(DDI_DEV_T_NONE, + dip, "reg", (int *)regs, (uint_t)5 * (regnum + 1)); + if (i != DDI_PROP_SUCCESS) { + TARGETDB_PRINT2((CE_WARN, "Failed to update reg %d", i)); + kmem_free(regs2, (regnum + 1) * sizeof (pci_regspec_t)); + return; + } + kmem_free(regs2, (regnum + 1) * sizeof (pci_regspec_t)); + regs = NULL; + + if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, + "reg", (caddr_t)®s, &length) != + DDI_PROP_SUCCESS) { + TARGETDB_PRINT2((CE_WARN, "init_chipset_flush: failed1!")); + goto error; + } + regnum = length / sizeof (pci_regspec_t); + kmem_free(regs, (size_t)length); + + i9xx_private.physical = answer; + i9xx_private.size = AGP_PAGE_SIZE; + i9xx_private.regnum = regnum - 1; + ret = ddi_regs_map_setup(dip, i9xx_private.regnum, + (caddr_t *)&(i9xx_private.flush_page), 0, + i9xx_private.size, &dev_attr, + (ddi_acc_handle_t *)&i9xx_private.handle); + + if (ret != DDI_SUCCESS) { + TARGETDB_PRINT2((CE_WARN, "chipset_flush do_ioremap failed ")); + i9xx_private.handle = NULL; + return; + } + return; +error: + if (regs) + kmem_free(regs, (size_t)length); +} + static int agp_target_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { @@ -795,6 +973,29 @@ agp_target_ioctl(dev_t dev, int cmd, intptr_t data, int mode, break; } + case INTEL_CHIPSET_FLUSH_SETUP: + { + intel_chipset_flush_setup(st->tsoft_dip, + st->tsoft_pcihdl, st->tsoft_gms_off); + break; + } + case INTEL_CHIPSET_FLUSH: + { + if (i9xx_private.handle != NULL) + ddi_put32(i9xx_private.handle, + (uint32_t *)(uintptr_t)i9xx_private.flush_page, 1); + + break; + } + case INTEL_CHIPSET_FLUSH_FREE: + { + if (i9xx_private.handle != NULL) { + ddi_regs_map_free( + (ddi_acc_handle_t *)&i9xx_private.handle); + i9xx_private.handle = NULL; + } + break; + } default: mutex_exit(&st->tsoft_lock); return (ENXIO); |