summaryrefslogtreecommitdiff
path: root/usr/src/uts/common
diff options
context:
space:
mode:
authormiao chen - Sun Microsystems - Beijing China <Miao.Chen@Sun.COM>2009-12-05 13:25:40 +0800
committermiao chen - Sun Microsystems - Beijing China <Miao.Chen@Sun.COM>2009-12-05 13:25:40 +0800
commit0035d21c77a24d02faf34c10aabc120ca692efb5 (patch)
tree10cfba243ff76ec208d28baf0a4bad8d2ac853c5 /usr/src/uts/common
parentc2e5330e09ea2d4fb7299851f5ebf26155c2117f (diff)
downloadillumos-gate-0035d21c77a24d02faf34c10aabc120ca692efb5.tar.gz
PSARC 2009/425 Additional ioctls for GEM support in i915 driver
PSARC 2009/474 Additional IOCTL Support in Agpgart Driver 6815826 GEM should be supported in drm driver 6904304 System panic in pci_get_available_prop() in busra.c
Diffstat (limited to 'usr/src/uts/common')
-rw-r--r--usr/src/uts/common/Makefile.files2
-rw-r--r--usr/src/uts/common/io/busra.c5
-rw-r--r--usr/src/uts/common/io/drm/drm.h46
-rw-r--r--usr/src/uts/common/io/drm/drmP.h278
-rw-r--r--usr/src/uts/common/io/drm/drm_agpsupport.c103
-rw-r--r--usr/src/uts/common/io/drm/drm_atomic.h3
-rw-r--r--usr/src/uts/common/io/drm/drm_bufs.c3
-rw-r--r--usr/src/uts/common/io/drm/drm_cache.c67
-rw-r--r--usr/src/uts/common/io/drm/drm_drv.c40
-rw-r--r--usr/src/uts/common/io/drm/drm_fops.c8
-rw-r--r--usr/src/uts/common/io/drm/drm_gem.c698
-rw-r--r--usr/src/uts/common/io/drm/drm_irq.c120
-rw-r--r--usr/src/uts/common/io/drm/drm_linux_list.h36
-rw-r--r--usr/src/uts/common/io/drm/drm_memory.c10
-rw-r--r--usr/src/uts/common/io/drm/drm_mm.c336
-rw-r--r--usr/src/uts/common/io/drm/drm_sunmod.c85
-rw-r--r--usr/src/uts/common/sys/agp/agpdefs.h37
-rw-r--r--usr/src/uts/common/sys/agp/agpgart_impl.h16
-rw-r--r--usr/src/uts/common/sys/agp/agpmaster_io.h9
-rw-r--r--usr/src/uts/common/sys/agp/agptarget_io.h7
-rw-r--r--usr/src/uts/common/sys/agpgart.h21
21 files changed, 1795 insertions, 135 deletions
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 1278f6356f..1feed57654 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -1056,7 +1056,7 @@ DRM_OBJS += drm_sunmod.o drm_kstat.o drm_agpsupport.o \
drm_auth.o drm_bufs.o drm_context.o drm_dma.o \
drm_drawable.o drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_msg.o drm_pci.o drm_scatter.o \
- ati_pcigart.o
+ drm_cache.o drm_gem.o drm_mm.o ati_pcigart.o
FM_OBJS += devfm.o devfm_machdep.o
diff --git a/usr/src/uts/common/io/busra.c b/usr/src/uts/common/io/busra.c
index 8a72954676..66fc7a8c25 100644
--- a/usr/src/uts/common/io/busra.c
+++ b/usr/src/uts/common/io/busra.c
@@ -444,7 +444,7 @@ ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
* Update dip's "available" property, adding this piece of
* resource to the pool.
*/
- (void) pci_put_available_prop(dip, base, len, type);
+ (void) pci_put_available_prop(dipmap->ra_dip, base, len, type);
done:
return (NDI_SUCCESS);
@@ -732,7 +732,8 @@ ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
* resource from the pool.
*/
if ((rval == NDI_SUCCESS) || (rval == NDI_RA_PARTIAL_REQ))
- (void) pci_get_available_prop(dip, *retbasep, *retlenp, type);
+ (void) pci_get_available_prop(dipmap->ra_dip,
+ *retbasep, *retlenp, type);
return (rval);
}
diff --git a/usr/src/uts/common/io/drm/drm.h b/usr/src/uts/common/io/drm/drm.h
index 13e8bcf33c..87af6eddfe 100644
--- a/usr/src/uts/common/io/drm/drm.h
+++ b/usr/src/uts/common/io/drm/drm.h
@@ -13,6 +13,7 @@
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -665,6 +666,19 @@ typedef union drm_wait_vblank {
struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+typedef struct drm_modeset_ctl {
+ uint32_t crtc;
+ uint32_t cmd;
+} drm_modeset_ctl_t;
+
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
@@ -737,6 +751,34 @@ typedef struct drm_set_version {
int drm_dd_minor;
} drm_set_version_t;
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+typedef struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+} drm_gem_close_t;
+
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+typedef struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+} drm_gem_flink_t;
+
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+typedef struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+} drm_gem_open_t;
+
/**
* \name Ioctls Definitions
*/
@@ -756,6 +798,10 @@ typedef struct drm_set_version {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, drm_modeset_ctl_t)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, drm_gem_close_t)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, drm_gem_flink_t)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, drm_gem_open_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
diff --git a/usr/src/uts/common/io/drm/drmP.h b/usr/src/uts/common/io/drm/drmP.h
index 4c6934db87..05105148b5 100644
--- a/usr/src/uts/common/io/drm/drmP.h
+++ b/usr/src/uts/common/io/drm/drmP.h
@@ -5,6 +5,7 @@
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -151,8 +152,9 @@
#define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
#define spin_lock_irqsave(l, flag) mutex_enter(l)
#define spin_unlock_irqrestore(u, flag) mutex_exit(u)
-#define spin_lock(l) mutex_enter(l)
-#define spin_unlock(u) mutex_exit(u)
+#define spin_lock(l) mutex_enter(l)
+#define spin_unlock(u) mutex_exit(u)
+
#define DRM_UDELAY(sec) delay(drv_usectohz(sec *1000))
#define DRM_MEMORYBARRIER()
@@ -166,12 +168,16 @@ typedef struct drm_driver_info drm_driver_t;
drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
#define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
- if (ddi_copyin(src, dest, size, 0)) \
- return (EFAULT)
+ if (ddi_copyin((src), (dest), (size), 0)) { \
+ DRM_ERROR("%s: copy from user failed", __func__); \
+ return (EFAULT); \
+ }
#define DRM_COPYTO_WITH_RETURN(dest, src, size) \
- if (ddi_copyout((src), (dest), (size), 0)) \
- return (EFAULT)
+ if (ddi_copyout((src), (dest), (size), 0)) { \
+ DRM_ERROR("%s: copy to user failed", __func__); \
+ return (EFAULT); \
+ }
#define DRM_COPY_FROM_USER(dest, src, size) \
ddi_copyin((src), (dest), (size), 0) /* flag for src */
@@ -222,6 +228,8 @@ typedef struct drm_wait_queue {
mutex_exit(&(q)->lock); \
}
+#define jiffies ddi_get_lbolt()
+
#define DRM_WAIT_ON(ret, q, timeout, condition) \
mutex_enter(&(q)->lock); \
while (!(condition)) { \
@@ -239,6 +247,21 @@ typedef struct drm_wait_queue {
} \
mutex_exit(&(q)->lock);
+#define DRM_WAIT(ret, q, condition) \
+mutex_enter(&(q)->lock); \
+if (!(condition)) { \
+ ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
+ if (ret == -1) { \
+ /* gfx maybe hang */ \
+ if (!(condition)) \
+ ret = -2; \
+ } else { \
+ ret = 0; \
+ } \
+} \
+mutex_exit(&(q)->lock);
+
+
#define DRM_GETSAREA() \
{ \
drm_local_map_t *map; \
@@ -255,8 +278,8 @@ typedef struct drm_wait_queue {
#define LOCK_TEST_WITH_RETURN(dev, fpriv) \
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
dev->lock.filp != fpriv) { \
- DRM_ERROR("called without lock held"); \
- return (EINVAL); \
+ DRM_DEBUG("%s called without lock held", __func__); \
+ return (EINVAL); \
}
#define DRM_IRQ_ARGS caddr_t arg
@@ -281,6 +304,11 @@ enum {
#define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
#define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
+#define DRM_GEM_OBJIDR_HASHNODE 1024
+#define idr_list_for_each(entry, head) \
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
+ list_for_each(entry, &(head)->next[key])
+
/*
* wait for 400 milliseconds
*/
@@ -378,6 +406,88 @@ typedef struct drm_buf_entry {
} drm_buf_entry_t;
typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+
+/* BEGIN CSTYLED */
+typedef struct drm_local_map {
+ unsigned long offset; /* Physical address (0 for SAREA) */
+ unsigned long size; /* Physical size (bytes) */
+ drm_map_type_t type; /* Type of memory mapped */
+ drm_map_flags_t flags; /* Flags */
+ void *handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* Boolean: MTRR used */
+ /* Private data */
+ int rid; /* PCI resource ID for bus_space */
+ int kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
+ caddr_t dev_addr; /* base device address */
+ ddi_acc_handle_t dev_handle; /* The data access handle */
+ ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free */
+ TAILQ_ENTRY(drm_local_map) link;
+} drm_local_map_t;
+/* END CSTYLED */
+
+/*
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /* Reference count of this object */
+ atomic_t refcount;
+
+ /* Handle count of this object. Each handle also holds a reference */
+ atomic_t handlecount;
+
+ /* Related drm device */
+ struct drm_device *dev;
+
+ int flink;
+ /*
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /*
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /*
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /*
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+
+ drm_local_map_t *map;
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ caddr_t kaddr;
+ size_t real_size; /* real size of memory */
+ pfn_t *pfnarray;
+};
+
+struct idr_list {
+ struct idr_list *next, *prev;
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ caddr_t contain_ptr;
+};
+
struct drm_file {
TAILQ_ENTRY(drm_file) link;
int authenticated;
@@ -389,6 +499,13 @@ struct drm_file {
drm_magic_t magic;
unsigned long ioctl_count;
void *driver_priv;
+ /* Mapping of mm object handles to object pointers. */
+ struct idr_list object_idr;
+ /* Lock for synchronization of access to object_idr. */
+ kmutex_t table_lock;
+
+ dev_t dev;
+ cred_t *credp;
};
typedef struct drm_lock_data {
@@ -467,26 +584,26 @@ typedef struct drm_sg_mem {
drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
} drm_sg_mem_t;
-typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
+/*
+ * Generic memory manager structs
+ */
-/* BEGIN CSTYLED */
-typedef struct drm_local_map {
- unsigned long offset; /* Physical address (0 for SAREA) */
- unsigned long size; /* Physical size (bytes) */
- drm_map_type_t type; /* Type of memory mapped */
- drm_map_flags_t flags; /* Flags */
- void *handle; /* User-space: "Handle" to pass to mmap */
- /* Kernel-space: kernel-virtual address */
- int mtrr; /* Boolean: MTRR used */
- /* Private data */
- int rid; /* PCI resource ID for bus_space */
- int kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
- caddr_t dev_addr; /* base device address */
- ddi_acc_handle_t dev_handle; /* The data access handle */
- ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free */
- TAILQ_ENTRY(drm_local_map) link;
-} drm_local_map_t;
-/* END CSTYLED */
+struct drm_mm_node {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+};
+
+typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
typedef struct drm_vbl_sig {
@@ -556,6 +673,16 @@ struct drm_driver_info {
int (*enable_vblank)(struct drm_device *dev, int crtc);
void (*disable_vblank)(struct drm_device *dev, int crtc);
+ /*
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+
drm_ioctl_desc_t *driver_ioctls;
int max_driver_ioctl;
@@ -577,6 +704,7 @@ struct drm_driver_info {
unsigned use_vbl_irq :1;
unsigned use_vbl_irq2 :1;
unsigned use_mtrr :1;
+ unsigned use_gem;
};
/*
@@ -607,11 +735,12 @@ struct drm_device {
int flags; /* Flags to open(2) */
/* Locks */
- kmutex_t vbl_lock; /* protects vblank operations */
- kmutex_t dma_lock; /* protects dev->dma */
- kmutex_t irq_lock; /* protects irq condition checks */
- kmutex_t dev_lock; /* protects everything else */
+ kmutex_t vbl_lock; /* protects vblank operations */
+ kmutex_t dma_lock; /* protects dev->dma */
+ kmutex_t irq_lock; /* protects irq condition checks */
+ kmutex_t dev_lock; /* protects everything else */
drm_lock_data_t lock; /* Information on hardware lock */
+ kmutex_t struct_mutex; /* < For others */
/* Usage Counters */
int open_count; /* Outstanding files open */
@@ -651,7 +780,12 @@ struct drm_device {
drm_vbl_sig_list_t vbl_sig_list;
drm_vbl_sig_list_t vbl_sig_list2;
-
+ /*
+ * At load time, disabling the vblank interrupt won't be allowed since
+ * old clients may not call the modeset ioctl and therefore misbehave.
+ * Once the modeset ioctl *has* been called though, we can safely
+ * disable them when unused.
+ */
int vblank_disable_allowed;
wait_queue_head_t vbl_queue; /* vbl wait channel */
@@ -672,13 +806,13 @@ struct drm_device {
u32 *last_vblank;
/* so we don't call enable more than */
atomic_t *vblank_enabled;
- /* for compensation of spurious wraparounds */
- u32 *vblank_premodeset;
+ /* Display driver is setting mode */
+ int *vblank_inmodeset;
/* Don't wait while crtc is likely disabled */
- int *vblank_suspend;
+ int *vblank_suspend;
/* size of vblank counter register */
- u32 max_vblank_count;
- int num_crtcs;
+ u32 max_vblank_count;
+ int num_crtcs;
kmutex_t tasklet_lock;
void (*locked_tasklet_func)(struct drm_device *dev);
@@ -698,6 +832,22 @@ struct drm_device {
u32 *drw_bitfield;
unsigned int drw_info_length;
drm_drawable_info_t **drw_info;
+
+ /* \name GEM information */
+ /* @{ */
+ kmutex_t object_name_lock;
+ struct idr_list object_name_idr;
+ atomic_t object_count;
+ atomic_t object_memory;
+ atomic_t pin_count;
+ atomic_t pin_memory;
+ atomic_t gtt_count;
+ atomic_t gtt_memory;
+ uint32_t gtt_total;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
+ /* @} */
+
/*
* Saving S3 context
*/
@@ -767,8 +917,8 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc);
int drm_vblank_get(struct drm_device *dev, int crtc);
void drm_vblank_put(struct drm_device *dev, int crtc);
int drm_vblank_init(struct drm_device *dev, int num_crtcs);
-void drm_locked_tasklet(drm_device_t *, void(*func)(drm_device_t *));
void drm_vblank_cleanup(struct drm_device *dev);
+int drm_modeset_ctl(DRM_IOCTL_ARGS);
/* AGP/GART support (drm_agpsupport.c) */
int drm_device_is_agp(drm_device_t *);
@@ -776,10 +926,21 @@ int drm_device_is_pcie(drm_device_t *);
drm_agp_head_t *drm_agp_init(drm_device_t *);
void drm_agp_fini(drm_device_t *);
int drm_agp_do_release(drm_device_t *);
-void *drm_agp_allocate_memory(size_t, uint32_t);
-int drm_agp_free_memory(void *);
+void *drm_agp_allocate_memory(size_t pages,
+ uint32_t type, drm_device_t *dev);
+int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
int drm_agp_unbind_memory(unsigned long, drm_device_t *);
+int drm_agp_bind_pages(drm_device_t *dev,
+ pfn_t *pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset);
+int drm_agp_unbind_pages(drm_device_t *dev,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type);
+void drm_agp_chipset_flush(struct drm_device *dev);
+void drm_agp_rebind(struct drm_device *dev);
/* kstat support (drm_kstats.c) */
int drm_init_kstats(drm_device_t *);
@@ -797,6 +958,8 @@ int drm_lock(DRM_IOCTL_ARGS);
int drm_unlock(DRM_IOCTL_ARGS);
int drm_version(DRM_IOCTL_ARGS);
int drm_setversion(DRM_IOCTL_ARGS);
+/* Cache management (drm_cache.c) */
+void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_irq_by_busid(DRM_IOCTL_ARGS);
@@ -858,8 +1021,15 @@ int drm_agp_bind(DRM_IOCTL_ARGS);
int drm_sg_alloc(DRM_IOCTL_ARGS);
int drm_sg_free(DRM_IOCTL_ARGS);
-extern int drm_debug_flag;
+/* drm_mm.c */
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size, unsigned alignment);
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment, int best_match);
+extern void drm_mm_clean_ml(const struct drm_mm *mm);
+extern int drm_debug_flag;
/* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
extern void drm_debug(const char *fmt, ...);
@@ -905,4 +1075,30 @@ extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
cred_t *);
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_object_free(struct drm_gem_object *obj);
+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
+ size_t size);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
+
+void drm_gem_object_reference(struct drm_gem_object *obj);
+void drm_gem_object_unreference(struct drm_gem_object *obj);
+
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep);
+void drm_gem_object_handle_reference(struct drm_gem_object *obj);
+
+void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
+ int handle);
+int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
+void drm_gem_open(struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
+
#endif /* _DRMP_H */
diff --git a/usr/src/uts/common/io/drm/drm_agpsupport.c b/usr/src/uts/common/io/drm/drm_agpsupport.c
index 48f7f5e454..ae695dabaf 100644
--- a/usr/src/uts/common/io/drm/drm_agpsupport.c
+++ b/usr/src/uts/common/io/drm/drm_agpsupport.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -10,6 +10,7 @@
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -37,8 +38,6 @@
*
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drm.h"
#include "drmP.h"
@@ -193,6 +192,7 @@ drm_agp_enable(DRM_IOCTL_ARGS)
dev->agp->mode = modes.mode;
setup.agps_mode = (uint32_t)modes.mode;
+
DRM_DEBUG("drm_agp_enable: dev->agp->mode=%lx", modes.mode);
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_SETUP,
@@ -205,6 +205,7 @@ drm_agp_enable(DRM_IOCTL_ARGS)
dev->agp->base = dev->agp->agp_info.agpi_aperbase;
dev->agp->enabled = 1;
+ DRM_DEBUG("drm_agp_enable: dev->agp->base=0x%lx", dev->agp->base);
return (0);
}
@@ -247,6 +248,8 @@ drm_agp_alloc(DRM_IOCTL_ARGS)
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
+ DRM_DEBUG("entry->phys_addr %lx", entry->phys_addr);
+
/* physical is used only by i810 driver */
request.physical = alloc.agpa_physical;
request.handle = (unsigned long)entry->handle;
@@ -383,7 +386,6 @@ drm_agp_init(drm_device_t *dev)
drm_agp_head_t *agp = NULL;
int retval, rval;
- DRM_DEBUG("drm_agp_init\n");
agp = kmem_zalloc(sizeof (drm_agp_head_t), KM_SLEEP);
retval = ldi_ident_from_dip(dev->dip, &agp->agpgart_li);
@@ -437,14 +439,14 @@ drm_agp_fini(drm_device_t *dev)
/*ARGSUSED*/
void *
-drm_agp_allocate_memory(size_t pages, uint32_t type)
+drm_agp_allocate_memory(size_t pages, uint32_t type, drm_device_t *dev)
{
return (NULL);
}
/*ARGSUSED*/
int
-drm_agp_free_memory(void *handle)
+drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev)
{
return (1);
}
@@ -494,3 +496,92 @@ drm_agp_unbind_memory(unsigned long handle, drm_device_t *dev)
entry->bound = 0;
return (0);
}
+
+/*
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+int
+drm_agp_bind_pages(drm_device_t *dev,
+ pfn_t *pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset)
+{
+
+ agp_bind_pages_t bind;
+ int ret, rval;
+
+ bind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ bind.agpb_pgcount = num_pages;
+ bind.agpb_pages = pages;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_BIND,
+ (intptr_t)&bind, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("AGPIOC_PAGES_BIND failed ret %d", ret);
+ return (ret);
+ }
+ return (0);
+}
+
+int
+drm_agp_unbind_pages(drm_device_t *dev,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type)
+{
+
+ agp_unbind_pages_t unbind;
+ int ret, rval;
+
+ unbind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ unbind.agpb_pgcount = num_pages;
+ unbind.agpb_type = type;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_DEBUG("drm_agp_unbind_pages AGPIOC_PAGES_UNBIND failed");
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * Certain Intel chipsets contains a global write buffer, and this can require
+ * flushing from the drm or X.org to make sure all data has hit RAM before
+ * initiating a GPU transfer, due to a lack of coherency with the integrated
+ * graphics device and this buffer.
+ */
+void
+drm_agp_chipset_flush(struct drm_device *dev)
+{
+ int ret, rval;
+
+ DRM_DEBUG("agp_chipset_flush");
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_FLUSHCHIPSET,
+ (intptr_t)0, FKIOCTL, kcred, &rval);
+ if (ret != 0) {
+ DRM_ERROR("Failed to drm_agp_chipset_flush ret %d", ret);
+ }
+}
+
+/*
+ * The pages are evict on suspend, so re-bind it at resume time
+ */
+void
+drm_agp_rebind(struct drm_device *dev)
+{
+ int ret, rval;
+
+ if (!dev->agp) {
+ return;
+ }
+
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_REBIND,
+ (intptr_t)0, FKIOCTL, kcred, &rval);
+ if (ret != 0) {
+ DRM_ERROR("rebind failed %d", ret);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_atomic.h b/usr/src/uts/common/io/drm/drm_atomic.h
index 002b974933..b8a4f56091 100644
--- a/usr/src/uts/common/io/drm/drm_atomic.h
+++ b/usr/src/uts/common/io/drm/drm_atomic.h
@@ -11,6 +11,7 @@
/*
* Copyright 2004 Eric Anholt
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +58,7 @@ typedef uint32_t atomic_t;
#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_dec_uint(p)
#define atomic_add(n, p) atomic_add_int(p, n)
-#define atomic_sub(n, p) atomic_dec_uint(p, n)
+#define atomic_sub(n, p) atomic_add_int(p, -n)
#define atomic_set_int(p, bits) atomic_or_uint(p, bits)
#define atomic_clear_int(p, bits) atomic_and_uint(p, ~(bits))
#define atomic_cmpset_int(p, c, n) \
diff --git a/usr/src/uts/common/io/drm/drm_bufs.c b/usr/src/uts/common/io/drm/drm_bufs.c
index ad1254072a..ec01d37dab 100644
--- a/usr/src/uts/common/io/drm/drm_bufs.c
+++ b/usr/src/uts/common/io/drm/drm_bufs.c
@@ -5,6 +5,7 @@
/*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -170,7 +171,7 @@ int drm_addmap(drm_device_t *dev, unsigned long offset,
break;
case _DRM_CONSISTENT:
- cmn_err(CE_WARN, "%d DRM_AGP_CONSISTENT", __LINE__);
+ DRM_ERROR("%d DRM_AGP_CONSISTENT", __LINE__);
return (ENOTSUP);
case _DRM_AGP:
map->offset += dev->agp->base;
diff --git a/usr/src/uts/common/io/drm/drm_cache.c b/usr/src/uts/common/io/drm/drm_cache.c
new file mode 100644
index 0000000000..44a19c0703
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_cache.c
@@ -0,0 +1,67 @@
+/*
+ *
+ * Copyright(c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files(the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice(including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/x86_archext.h>
+#include <vm/seg_kmem.h>
+#include "drmP.h"
+
+extern void clflush_insn(caddr_t addr);
+extern void mfence_insn(void);
+
+static void
+drm_clflush_page(caddr_t page)
+{
+ unsigned int i;
+
+ if (page == NULL)
+ return;
+
+ for (i = 0; i < PAGE_SIZE; i += x86_clflush_size)
+ clflush_insn(page + i);
+ mfence_insn();
+}
+
+void
+drm_clflush_pages(caddr_t *pages, unsigned long num_pages)
+{
+
+ if (x86_feature & X86_CLFSH) {
+ unsigned long i;
+
+ for (i = 0; i < num_pages; i++)
+ drm_clflush_page(pages[i]);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_drv.c b/usr/src/uts/common/io/drm/drm_drv.c
index b104843031..ae86db50d7 100644
--- a/usr/src/uts/common/io/drm/drm_drv.c
+++ b/usr/src/uts/common/io/drm/drm_drv.c
@@ -5,6 +5,7 @@
/*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -61,6 +62,14 @@ drm_ioctl_desc_t drm_ioctls[DRIVER_IOCTL_COUNT] = {
{drm_getstats, 0},
[DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] =
{drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_MODESET_CTL)] =
+ {drm_modeset_ctl, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GEM_CLOSE)] =
+ {drm_gem_close_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GEM_FLINK)] =
+ {drm_gem_flink_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_GEM_OPEN)] =
+ {drm_gem_open_ioctl, DRM_AUTH},
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] =
{drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] =
@@ -141,6 +150,8 @@ drm_ioctl_desc_t drm_ioctls[DRIVER_IOCTL_COUNT] = {
{drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
};
+extern void idr_list_free(struct idr_list *head);
+
const char *
drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
{
@@ -263,7 +274,7 @@ drm_lastclose(drm_device_t *dev)
if (entry->bound)
(void) drm_agp_unbind_memory(
(unsigned long)entry->handle, dev);
- (void) drm_agp_free_memory(entry->handle);
+ (void) drm_agp_free_memory(entry->handle, dev);
drm_free(entry, sizeof (*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
@@ -337,6 +348,15 @@ drm_load(drm_device_t *dev)
goto error;
}
+ if (dev->driver->use_gem == 1) {
+ retcode = drm_gem_init(dev);
+ if (retcode) {
+ DRM_ERROR("Cannot initialize graphics execution "
+ "manager (GEM)\n");
+ goto error;
+ }
+ }
+
if (drm_init_kstats(dev)) {
DRM_ERROR("drm_attach => drm_load: init kstats error");
retcode = EFAULT;
@@ -375,6 +395,11 @@ drm_unload(drm_device_t *dev)
drm_ctxbitmap_cleanup(dev);
+ if (dev->driver->use_gem == 1) {
+ idr_list_free(&dev->object_name_idr);
+ mutex_destroy(&dev->object_name_lock);
+ }
+
DRM_LOCK();
(void) drm_lastclose(dev);
DRM_UNLOCK();
@@ -393,6 +418,10 @@ drm_unload(drm_device_t *dev)
mutex_destroy(&dev->dev_lock);
mutex_destroy(&dev->drw_lock);
mutex_destroy(&dev->tasklet_lock);
+
+ dev->gtt_total = 0;
+ atomic_set(&dev->pin_memory, 0);
+ DRM_ERROR("drm_unload");
}
@@ -464,12 +493,17 @@ drm_close(drm_device_t *dev, int minor, int flag, int otyp,
"retake lock not implemented yet");
}
- if (dev->driver->use_dma)
+ if (dev->driver->use_dma) {
drm_reclaim_buffers(dev, fpriv);
+ }
+ if (dev->driver->use_gem == 1) {
+ drm_gem_release(dev, fpriv);
+ }
- if (dev->driver->postclose != NULL)
+ if (dev->driver->postclose != NULL) {
dev->driver->postclose(dev, fpriv);
+ }
TAILQ_REMOVE(&dev->files, fpriv, link);
drm_free(fpriv, sizeof (*fpriv), DRM_MEM_FILES);
diff --git a/usr/src/uts/common/io/drm/drm_fops.c b/usr/src/uts/common/io/drm/drm_fops.c
index 7f2ba588bd..da61e4dd07 100644
--- a/usr/src/uts/common/io/drm/drm_fops.c
+++ b/usr/src/uts/common/io/drm/drm_fops.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -11,6 +11,7 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -41,8 +42,6 @@
/* END CSTYLED */
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
/*ARGSUSED*/
@@ -110,6 +109,9 @@ drm_open_helper(drm_device_t *dev, drm_cminor_t *mp, int flags,
/* for compatibility root is always authenticated */
priv->authenticated = DRM_SUSER(credp);
+ if (dev->driver->use_gem == 1)
+ drm_gem_open(priv);
+
if (dev->driver->open) {
retcode = dev->driver->open(dev, priv);
if (retcode != 0) {
diff --git a/usr/src/uts/common/io/drm/drm_gem.c b/usr/src/uts/common/io/drm/drm_gem.c
new file mode 100644
index 0000000000..9805ae7b62
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_gem.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <vm/anon.h>
+#include <vm/seg_kmem.h>
+#include <vm/seg_kp.h>
+#include <vm/seg_map.h>
+#include <sys/fcntl.h>
+#include <sys/vnode.h>
+#include <sys/file.h>
+#include <sys/bitmap.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <gfx_private.h>
+#include "drmP.h"
+#include "drm.h"
+
+/*
+ * @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file. However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ * default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ * handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs(called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls. The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+void
+idr_list_init(struct idr_list *head)
+{
+ struct idr_list *entry;
+ /* HASH for accelerate */
+ entry = kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
+ * sizeof (struct idr_list), KM_NOSLEEP);
+ head->next = entry;
+ for (int i = 0; i < DRM_GEM_OBJIDR_HASHNODE; i++) {
+ INIT_LIST_HEAD(&entry[i]);
+ }
+}
+
+int
+idr_list_get_new_above(struct idr_list *head,
+ struct drm_gem_object *obj,
+ int *handlep)
+{
+ struct idr_list *entry;
+ int key;
+ entry = kmem_zalloc(sizeof (*entry), KM_NOSLEEP);
+ key = obj->name % DRM_GEM_OBJIDR_HASHNODE;
+ list_add(entry, &head->next[key], NULL);
+ entry->obj = obj;
+ entry->handle = obj->name;
+ *handlep = obj->name;
+ return (0);
+}
+
+struct drm_gem_object *
+idr_list_find(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+
+ list_for_each(entry, &head->next[key]) {
+ if (entry->handle == name)
+ return (entry->obj);
+ }
+ return (NULL);
+}
+
+int
+idr_list_remove(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry, *temp;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ if (entry->handle == name) {
+ list_del(entry);
+ kmem_free(entry, sizeof (*entry));
+ return (0);
+ }
+ }
+ DRM_ERROR("Failed to remove the object %d", name);
+ return (-1);
+}
+
+void
+idr_list_free(struct idr_list *head)
+{
+ struct idr_list *entry, *temp;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ list_del(entry);
+ kmem_free(entry, sizeof (*entry));
+ }
+ }
+ kmem_free(head->next,
+ DRM_GEM_OBJIDR_HASHNODE * sizeof (struct idr_list));
+ head->next = NULL;
+}
+
+int
+idr_list_empty(struct idr_list *head)
+{
+ int empty;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ empty = list_empty(&(head)->next[key]);
+ if (!empty)
+ return (empty);
+ }
+ return (1);
+}
+
+static uint32_t shfile_name = 0;
+#define SHFILE_NAME_MAX 0xffffffff
+
+/*
+ * will be set to 1 for 32 bit x86 systems only, in startup.c
+ */
+extern int segkp_fromheap;
+extern ulong_t *segkp_bitmap;
+
+void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ atomic_inc(&obj->refcount);
+}
+
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ atomic_sub(1, &obj->refcount);
+ if (obj->refcount == 0)
+ drm_gem_object_free(obj);
+}
+
+void
+drm_gem_object_handle_reference(struct drm_gem_object *obj)
+{
+ drm_gem_object_reference(obj);
+ atomic_inc(&obj->handlecount);
+}
+
+void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ atomic_sub(1, &obj->handlecount);
+ if (obj->handlecount == 0)
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference(obj);
+}
+
+/*
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+ mutex_init(&dev->object_name_lock, NULL, MUTEX_DRIVER, NULL);
+ idr_list_init(&dev->object_name_idr);
+
+ atomic_set(&dev->object_count, 0);
+ atomic_set(&dev->object_memory, 0);
+ atomic_set(&dev->pin_count, 0);
+ atomic_set(&dev->pin_memory, 0);
+ atomic_set(&dev->gtt_count, 0);
+ atomic_set(&dev->gtt_memory, 0);
+ return (0);
+}
+
+/*
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ static ddi_dma_attr_t dma_attr = {
+ DMA_ATTR_V0,
+ 0U, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ 4096, /* dma_attr_align */
+ 0x1fffU, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 4, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+ };
+ static ddi_device_acc_attr_t acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC
+ };
+ struct drm_gem_object *obj;
+ ddi_dma_cookie_t cookie;
+ uint_t cookie_cnt;
+ drm_local_map_t *map;
+
+ pgcnt_t real_pgcnt, pgcnt = btopr(size);
+ uint32_t paddr, cookie_end;
+ int i, n;
+
+ obj = kmem_zalloc(sizeof (struct drm_gem_object), KM_NOSLEEP);
+ if (obj == NULL)
+ return (NULL);
+
+ obj->dev = dev;
+ obj->flink = 0;
+ obj->size = size;
+
+ if (shfile_name == SHFILE_NAME_MAX) {
+ DRM_ERROR("No name space for object");
+ goto err1;
+ } else {
+ obj->name = ++shfile_name;
+ }
+
+ dma_attr.dma_attr_sgllen = (int)pgcnt;
+
+ if (ddi_dma_alloc_handle(dev->dip, &dma_attr,
+ DDI_DMA_DONTWAIT, NULL, &obj->dma_hdl)) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_alloc_handle failed");
+ goto err1;
+ }
+ if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), &acc_attr,
+ IOMEM_DATA_UC_WR_COMBINE, DDI_DMA_DONTWAIT, NULL,
+ &obj->kaddr, &obj->real_size, &obj->acc_hdl)) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_mem_alloc failed");
+ goto err2;
+ }
+ if (ddi_dma_addr_bind_handle(obj->dma_hdl, NULL,
+ obj->kaddr, obj->real_size, DDI_DMA_RDWR,
+ DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_cnt)
+ != DDI_DMA_MAPPED) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_addr_bind_handle failed");
+ goto err3;
+ }
+
+ real_pgcnt = btopr(obj->real_size);
+
+ obj->pfnarray = kmem_zalloc(real_pgcnt * sizeof (pfn_t), KM_NOSLEEP);
+ if (obj->pfnarray == NULL) {
+ goto err4;
+ }
+ for (n = 0, i = 1; ; i++) {
+ for (paddr = cookie.dmac_address,
+ cookie_end = cookie.dmac_address + cookie.dmac_size;
+ paddr < cookie_end;
+ paddr += PAGESIZE) {
+ obj->pfnarray[n++] = btop(paddr);
+ if (n >= real_pgcnt)
+ goto addmap;
+ }
+ if (i >= cookie_cnt)
+ break;
+ ddi_dma_nextcookie(obj->dma_hdl, &cookie);
+ }
+
+addmap:
+ map = drm_alloc(sizeof (struct drm_local_map), DRM_MEM_MAPS);
+ if (map == NULL) {
+ goto err5;
+ }
+
+ map->handle = obj;
+ map->offset = (uintptr_t)map->handle;
+ map->offset &= 0xffffffffUL;
+ map->dev_addr = map->handle;
+ map->size = obj->real_size;
+ map->type = _DRM_TTM;
+ map->flags = _DRM_WRITE_COMBINING | _DRM_REMOVABLE;
+ map->drm_umem_cookie =
+ gfxp_umem_cookie_init(obj->kaddr, obj->real_size);
+ if (map->drm_umem_cookie == NULL) {
+ goto err6;
+ }
+
+ obj->map = map;
+
+ atomic_set(&obj->refcount, 1);
+ atomic_set(&obj->handlecount, 1);
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0) {
+ goto err7;
+ }
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+
+ return (obj);
+
+err7:
+ gfxp_umem_cookie_destroy(map->drm_umem_cookie);
+err6:
+ drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
+err5:
+ kmem_free(obj->pfnarray, real_pgcnt * sizeof (pfn_t));
+err4:
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+err3:
+ ddi_dma_mem_free(&obj->acc_hdl);
+err2:
+ ddi_dma_free_handle(&obj->dma_hdl);
+err1:
+ kmem_free(obj, sizeof (struct drm_gem_object));
+
+ return (NULL);
+}
+
+/*
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ int err;
+ /*
+ * This is gross. The idr system doesn't let us try a delete and
+ * return an error code. It just spews if you fail at deleting.
+ * So, we have to grab a lock around finding the object and then
+ * doing the delete on it and dropping the refcount, or the user
+ * could race us to double-decrement the refcount and cause a
+ * use-after-free later. Given the frequency of our handle lookups,
+ * we may want to use ida for number allocation and a hash table
+ * for the pointers, anyway.
+ */
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_list_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ DRM_ERROR("obj %d is not in tne list, failed to close", handle);
+ return (EINVAL);
+ }
+ dev = obj->dev;
+
+ /* Release reference and decrement refcount. */
+ err = idr_list_remove(&filp->object_idr, handle);
+ if (err == -1)
+ DRM_ERROR("%s", __func__);
+
+ spin_unlock(&filp->table_lock);
+
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return (0);
+}
+
+/*
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep)
+{
+ int ret;
+
+ /*
+ * Get the user-visible handle using idr.
+ */
+again:
+ /* ensure there is space available to allocate a handle */
+
+ /* do the allocation under our spinlock */
+ spin_lock(&file_priv->table_lock);
+ ret = idr_list_get_new_above(&file_priv->object_idr, obj, handlep);
+ spin_unlock(&file_priv->table_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0) {
+ DRM_ERROR("Failed to create handle");
+ return (ret);
+ }
+
+ drm_gem_object_handle_reference(obj);
+ return (0);
+}
+
+/* Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *filp,
+ int handle)
+{
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_list_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ DRM_ERROR("object_lookup failed, handle %d", handle);
+ return (NULL);
+ }
+
+ drm_gem_object_reference(obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return (obj);
+}
+
+/*
+ * Releases the handle to an mm object.
+ */
+/*ARGSUSED*/
+int
+drm_gem_close_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_gem_close args;
+ int ret;
+
+ if (!(dev->driver->use_gem == 1))
+ return (ENODEV);
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (void *)data, sizeof (args));
+
+ ret = drm_gem_handle_delete(fpriv, args.handle);
+
+ return (ret);
+}
+
+/*
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+/*ARGSUSED*/
+int
+drm_gem_flink_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_gem_flink args;
+ struct drm_gem_object *obj;
+ int ret, handle;
+
+ if (!(dev->driver->use_gem == 1))
+ return (ENODEV);
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (void *)data, sizeof (args));
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return (EINVAL);
+ handle = args.handle;
+ spin_lock(&dev->object_name_lock);
+ if (!obj->flink) {
+ /* only creat a node in object_name_idr, no update anything */
+ ret = idr_list_get_new_above(&dev->object_name_idr,
+ obj, &handle);
+ obj->flink = obj->name;
+ /* Allocate a reference for the name table. */
+ drm_gem_object_reference(obj);
+ }
+ /*
+ * Leave the reference from the lookup around as the
+ * name table now holds one
+ */
+ args.name = obj->name;
+
+ spin_unlock(&dev->object_name_lock);
+ ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
+ if (ret != 0)
+ DRM_ERROR(" gem flink error! %d", ret);
+
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ return (ret);
+}
+
+/*
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+/*ARGSUSED*/
+int
+drm_gem_open_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_gem_open args;
+ struct drm_gem_object *obj;
+ int ret;
+ int handle;
+
+ if (!(dev->driver->use_gem == 1)) {
+ DRM_ERROR("Not support GEM");
+ return (ENODEV);
+ }
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (void *) data, sizeof (args));
+
+ spin_lock(&dev->object_name_lock);
+
+ obj = idr_list_find(&dev->object_name_idr, args.name);
+
+ if (obj)
+ drm_gem_object_reference(obj);
+ spin_unlock(&dev->object_name_lock);
+ if (!obj) {
+ DRM_ERROR("Can't find the obj %d", args.name);
+ return (ENOENT);
+ }
+
+ ret = drm_gem_handle_create(fpriv, obj, &handle);
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ args.handle = args.name;
+ args.size = obj->size;
+
+ ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
+ if (ret != 0)
+ DRM_ERROR(" gem open error! %d", ret);
+ return (ret);
+}
+
+/*
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_file *file_private)
+{
+ idr_list_init(&file_private->object_idr);
+ mutex_init(&file_private->table_lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+/*
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static void
+drm_gem_object_release_handle(struct drm_gem_object *obj)
+{
+ drm_gem_object_handle_unreference(obj);
+}
+
+/*
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ struct idr_list *entry;
+ spin_lock(&dev->struct_mutex);
+
+ idr_list_for_each(entry, &file_private->object_idr)
+ drm_gem_object_release_handle(entry->obj);
+
+ idr_list_free(&file_private->object_idr);
+ spin_unlock(&dev->struct_mutex);
+
+}
+
+/*
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_local_map *map = obj->map;
+
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(obj);
+
+ gfxp_umem_cookie_destroy(map->drm_umem_cookie);
+ drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
+
+ kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
+
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+ ddi_dma_mem_free(&obj->acc_hdl);
+ ddi_dma_free_handle(&obj->dma_hdl);
+
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
+ kmem_free(obj, sizeof (struct drm_gem_object));
+}
+
+/*
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ int err;
+ struct drm_device *dev = obj->dev;
+ /* Remove any name for this object */
+ spin_lock(&dev->object_name_lock);
+ if (obj->flink) {
+ err = idr_list_remove(&dev->object_name_idr, obj->name);
+ if (err == -1)
+ DRM_ERROR("%s", __func__);
+ obj->flink = 0;
+ spin_unlock(&dev->object_name_lock);
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ */
+ drm_gem_object_unreference(obj);
+ } else
+
+ spin_unlock(&dev->object_name_lock);
+
+}
diff --git a/usr/src/uts/common/io/drm/drm_irq.c b/usr/src/uts/common/io/drm/drm_irq.c
index 928cb63e58..3d3640a3bd 100644
--- a/usr/src/uts/common/io/drm/drm_irq.c
+++ b/usr/src/uts/common/io/drm/drm_irq.c
@@ -4,6 +4,7 @@
*/
/*
* Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -119,6 +120,8 @@ drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof (u32) * dev->num_crtcs,
DRM_MEM_DRIVER);
+ drm_free(dev->vblank_inmodeset, sizeof (*dev->vblank_inmodeset) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
@@ -159,6 +162,12 @@ drm_vblank_init(struct drm_device *dev, int num_crtcs)
dev->last_vblank = drm_alloc(num_crtcs * sizeof (u32), DRM_MEM_DRIVER);
if (!dev->last_vblank)
goto err;
+
+ dev->vblank_inmodeset = drm_alloc(num_crtcs * sizeof (int),
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_inmodeset)
+ goto err;
+
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
DRM_INIT_WAITQUEUE(&dev->vbl_queues[i], DRM_INTR_PRI(dev));
@@ -380,6 +389,70 @@ drm_vblank_put(struct drm_device *dev, int crtc)
DRM_SPINUNLOCK(&dev->vbl_lock);
}
+/*
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets. If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+/*ARGSUSED*/
+int
+drm_modeset_ctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_modeset_ctl modeset;
+ int crtc, ret = 0;
+
+ /* If drm_vblank_init() hasn't been called yet, just no-op */
+ if (!dev->num_crtcs)
+ goto out;
+
+ DRM_COPYFROM_WITH_RETURN(&modeset, (void *)data,
+ sizeof (modeset));
+
+ crtc = modeset.crtc;
+ if (crtc >= dev->num_crtcs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+ switch (modeset.cmd) {
+ case _DRM_PRE_MODESET:
+ if (!dev->vblank_inmodeset[crtc]) {
+ dev->vblank_inmodeset[crtc] = 1;
+ ret = drm_vblank_get(dev, crtc);
+ }
+ break;
+ case _DRM_POST_MODESET:
+ if (dev->vblank_inmodeset[crtc]) {
+ DRM_SPINLOCK(&dev->vbl_lock);
+ dev->vblank_disable_allowed = 1;
+ dev->vblank_inmodeset[crtc] = 0;
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+ drm_vblank_put(dev, crtc);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ return (ret);
+}
+
/*ARGSUSED*/
int
drm_wait_vblank(DRM_IOCTL_ARGS)
@@ -390,7 +463,7 @@ drm_wait_vblank(DRM_IOCTL_ARGS)
unsigned int sequence;
if (!dev->irq_enabled) {
- DRM_DEBUG("wait vblank, EINVAL");
+ DRM_ERROR("wait vblank, EINVAL");
return (EINVAL);
}
#ifdef _MULTI_DATAMODEL
@@ -411,19 +484,20 @@ drm_wait_vblank(DRM_IOCTL_ARGS)
if (vblwait.request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
- cmn_err(CE_WARN, "drm_wait_vblank: wrong request type 0x%x",
+ DRM_ERROR("drm_wait_vblank: wrong request type 0x%x",
vblwait.request.type);
return (EINVAL);
}
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (crtc >= dev->num_crtcs)
+ if (crtc >= dev->num_crtcs) {
+ DRM_ERROR("wait vblank operation not support");
return (ENOTSUP);
-
+ }
ret = drm_vblank_get(dev, crtc);
if (ret) {
- DRM_DEBUG("can't get drm vblank");
+ DRM_ERROR("can't get drm vblank %d", ret);
return (ret);
}
sequence = drm_vblank_count(dev, crtc);
@@ -449,7 +523,7 @@ drm_wait_vblank(DRM_IOCTL_ARGS)
/*
* Don't block process, send signal when vblank interrupt
*/
- DRM_DEBUG("NOT SUPPORT YET, SHOULD BE ADDED");
+ DRM_ERROR("NOT SUPPORT YET, SHOULD BE ADDED");
cmn_err(CE_WARN, "NOT SUPPORT YET, SHOULD BE ADDED");
ret = EINVAL;
goto done;
@@ -457,8 +531,9 @@ drm_wait_vblank(DRM_IOCTL_ARGS)
/* block until vblank interupt */
/* shared code returns -errno */
DRM_WAIT_ON(ret, &dev->vbl_queues[crtc], 3 * DRM_HZ,
- ((drm_vblank_count(dev, crtc)
- - vblwait.request.sequence) <= (1 << 23)));
+ (((drm_vblank_count(dev, crtc)
+ - vblwait.request.sequence) <= (1 << 23)) ||
+ !dev->irq_enabled));
if (ret != EINTR) {
struct timeval now;
(void) uniqtime(&now);
@@ -503,33 +578,4 @@ drm_handle_vblank(struct drm_device *dev, int crtc)
{
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queues[crtc]);
- drm_vbl_send_signals(dev);
-}
-
-/*
- * Schedule a tasklet to call back a driver hook with the HW lock held.
- *
- * \param dev DRM device.
- * \param func Driver callback.
- *
- * This is intended for triggering actions that require the HW lock from an
- * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
- * completes. Note that the callback may be called from interrupt or process
- * context, it must not make any assumptions about this. Also, the HW lock will
- * be held with the kernel context or any client context.
- */
-
-void
-drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t *))
-{
- mutex_enter(&dev->tasklet_lock);
-
- if (dev->locked_tasklet_func) {
- mutex_exit(&dev->tasklet_lock);
- return;
- }
-
- dev->locked_tasklet_func = func;
-
- mutex_exit(&dev->tasklet_lock);
}
diff --git a/usr/src/uts/common/io/drm/drm_linux_list.h b/usr/src/uts/common/io/drm/drm_linux_list.h
index e811c9feba..02a4809bca 100644
--- a/usr/src/uts/common/io/drm/drm_linux_list.h
+++ b/usr/src/uts/common/io/drm/drm_linux_list.h
@@ -5,6 +5,7 @@
/*
* -
* Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,33 +32,48 @@
*
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
#ifndef _DRM_LINUX_LIST_H_
#define _DRM_LINUX_LIST_H_
struct list_head {
struct list_head *next, *prev;
+ caddr_t contain_ptr;
};
/* Cheat, assume the list_head is at the start of the struct */
-#define list_entry(entry, type, member) (type *)(entry)
+#define list_entry(entry, type, member) (type *)(uintptr_t)(entry->contain_ptr)
#define INIT_LIST_HEAD(head) { \
(head)->next = head; \
(head)->prev = head; \
+ (head)->contain_ptr = (caddr_t)head; \
}
-#define list_add_tail(entry, head) { \
+#define list_add(entry, head, con_ptr) { \
+ (head)->next->prev = entry; \
+ (entry)->next = (head)->next; \
+ (entry)->prev = head; \
+ (head)->next = entry; \
+ (entry)->contain_ptr = con_ptr; \
+}
+
+#define list_add_tail(entry, head, con_ptr) { \
(entry)->prev = (head)->prev; \
(entry)->next = head; \
(head)->prev->next = entry; \
(head)->prev = entry; \
+ (entry)->contain_ptr = con_ptr; \
}
#define list_del(entry) { \
(entry)->next->prev = (entry)->prev; \
(entry)->prev->next = (entry)->next; \
+ (entry)->contain_ptr = NULL; \
}
#define list_for_each(entry, head) \
@@ -65,7 +81,19 @@ struct list_head {
#define list_for_each_safe(entry, temp, head) \
for (entry = (head)->next, temp = (entry)->next; \
- temp != head; \
+ entry != head; \
entry = temp, temp = temp->next)
+#define list_del_init(entry) { \
+ list_del(entry); \
+ INIT_LIST_HEAD(entry); \
+}
+
+#define list_move_tail(entry, head, con_ptr) { \
+ list_del(entry); \
+ list_add_tail(entry, head, con_ptr); \
+}
+
+#define list_empty(head) ((head)->next == head)
+
#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_memory.c b/usr/src/uts/common/io/drm/drm_memory.c
index 153841f300..cf2d5f6d9d 100644
--- a/usr/src/uts/common/io/drm/drm_memory.c
+++ b/usr/src/uts/common/io/drm/drm_memory.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -10,6 +10,7 @@
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -37,8 +38,6 @@
*
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
/* Device memory access structure */
@@ -135,6 +134,7 @@ drm_get_pci_index_reg(dev_info_t *devi, uint_t physical, uint_t size,
break;
}
}
+
kmem_free(regs, (size_t)length);
return (regnum);
error:
@@ -204,8 +204,8 @@ drm_ioremap(drm_device_t *softstate, drm_local_map_t *map)
map->dev_addr = iomap.drm_base;
DRM_DEBUG(
- "map->handle is %p map->dev_addr is %lx",
- (void *)map->handle, (unsigned long)map->dev_addr);
+ "map->handle is %p map->dev_addr is %lx map->size %x",
+ (void *)map->handle, (unsigned long)map->dev_addr, map->size);
return (0);
}
diff --git a/usr/src/uts/common/io/drm/drm_mm.c b/usr/src/uts/common/io/drm/drm_mm.c
new file mode 100644
index 0000000000..d2d70c4cba
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_mm.c
@@ -0,0 +1,336 @@
+/*
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files(the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice(including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented.
+ * Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+
+unsigned long
+drm_mm_tail_space(struct drm_mm *mm)
+{
+ struct list_head *tail_node;
+ struct drm_mm_node *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ if (!entry->free)
+ return (0);
+
+ return (entry->size);
+}
+
+int
+drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+{
+ struct list_head *tail_node;
+ struct drm_mm_node *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ if (!entry->free)
+ return (ENOMEM);
+
+ if (entry->size <= size)
+ return (ENOMEM);
+
+ entry->size -= size;
+ return (0);
+}
+
+
+static int
+drm_mm_create_tail_node(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size)
+{
+ struct drm_mm_node *child;
+
+ child = (struct drm_mm_node *)
+ drm_alloc(sizeof (*child), DRM_MEM_MM);
+ if (!child)
+ return (ENOMEM);
+
+ child->free = 1;
+ child->size = size;
+ child->start = start;
+ child->mm = mm;
+
+ list_add_tail(&child->ml_entry, &mm->ml_entry, (caddr_t)child);
+ list_add_tail(&child->fl_entry, &mm->fl_entry, (caddr_t)child);
+
+ return (0);
+}
+
+
+int
+drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+{
+ struct list_head *tail_node;
+ struct drm_mm_node *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ if (!entry->free) {
+ return (drm_mm_create_tail_node(mm,
+ entry->start + entry->size, size));
+ }
+ entry->size += size;
+ return (0);
+}
+
+static struct drm_mm_node *
+drm_mm_split_at_start(struct drm_mm_node *parent,
+ unsigned long size)
+{
+ struct drm_mm_node *child;
+
+ child = (struct drm_mm_node *)
+ drm_alloc(sizeof (*child), DRM_MEM_MM);
+ if (!child)
+ return (NULL);
+
+ INIT_LIST_HEAD(&child->fl_entry);
+
+ child->free = 0;
+ child->size = size;
+ child->start = parent->start;
+ child->mm = parent->mm;
+
+ list_add_tail(&child->ml_entry, &parent->ml_entry, (caddr_t)child);
+ INIT_LIST_HEAD(&child->fl_entry);
+
+ parent->size -= size;
+ parent->start += size;
+ return (child);
+}
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void
+drm_mm_put_block(struct drm_mm_node *cur)
+{
+
+ struct drm_mm *mm = cur->mm;
+ struct list_head *cur_head = &cur->ml_entry;
+ struct list_head *root_head = &mm->ml_entry;
+ struct drm_mm_node *prev_node = NULL;
+ struct drm_mm_node *next_node;
+
+ int merged = 0;
+
+ if (cur_head->prev != root_head) {
+ prev_node = list_entry(cur_head->prev,
+ struct drm_mm_node, ml_entry);
+ if (prev_node->free) {
+ prev_node->size += cur->size;
+ merged = 1;
+ }
+ }
+ if (cur_head->next != root_head) {
+ next_node = list_entry(cur_head->next,
+ struct drm_mm_node, ml_entry);
+ if (next_node->free) {
+ if (merged) {
+ prev_node->size += next_node->size;
+ list_del(&next_node->ml_entry);
+ list_del(&next_node->fl_entry);
+ drm_free(next_node,
+ sizeof (*next_node), DRM_MEM_MM);
+ } else {
+ next_node->size += cur->size;
+ next_node->start = cur->start;
+ merged = 1;
+ }
+ }
+ }
+ if (!merged) {
+ cur->free = 1;
+ list_add(&cur->fl_entry, &mm->fl_entry, (caddr_t)cur);
+ } else {
+ list_del(&cur->ml_entry);
+ drm_free(cur, sizeof (*cur), DRM_MEM_MM);
+ }
+}
+
+struct drm_mm_node *
+drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+
+ struct drm_mm_node *align_splitoff = NULL;
+ struct drm_mm_node *child;
+ unsigned tmp = 0;
+
+ if (alignment)
+ tmp = parent->start % alignment;
+
+ if (tmp) {
+ align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+ if (!align_splitoff)
+ return (NULL);
+ }
+
+ if (parent->size == size) {
+ list_del_init(&parent->fl_entry);
+ parent->free = 0;
+ return (parent);
+ } else {
+ child = drm_mm_split_at_start(parent, size);
+ }
+
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
+
+ return (child);
+}
+
+struct drm_mm_node *
+drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ int best_match)
+{
+ struct list_head *list;
+ const struct list_head *free_stack = &mm->fl_entry;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+ unsigned wasted;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each(list, free_stack) {
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
+ wasted = 0;
+
+ if (entry->size < size)
+ continue;
+
+ if (alignment) {
+ register unsigned tmp = entry->start % alignment;
+ if (tmp)
+ wasted += alignment - tmp;
+ }
+
+
+ if (entry->size >= size + wasted) {
+ if (!best_match)
+ return (entry);
+ if (size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+ }
+
+ return (best);
+}
+
+int
+drm_mm_clean(struct drm_mm *mm)
+{
+ struct list_head *head = &mm->ml_entry;
+
+ return (head->next->next == head);
+}
+
+int
+drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size)
+{
+ INIT_LIST_HEAD(&mm->ml_entry);
+ INIT_LIST_HEAD(&mm->fl_entry);
+
+ return (drm_mm_create_tail_node(mm, start, size));
+}
+
+
+void
+drm_mm_takedown(struct drm_mm *mm)
+{
+ struct list_head *bnode = mm->fl_entry.next;
+ struct drm_mm_node *entry;
+
+ entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+
+ if (entry->ml_entry.next != &mm->ml_entry ||
+ entry->fl_entry.next != &mm->fl_entry) {
+ DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+ return;
+ }
+
+ list_del(&entry->fl_entry);
+ list_del(&entry->ml_entry);
+
+ drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+}
+
+void
+drm_mm_clean_ml(const struct drm_mm *mm)
+{
+ const struct list_head *mlstack = &mm->ml_entry;
+ struct list_head *list, *temp;
+ struct drm_mm_node *entry;
+
+ if (mlstack->next == NULL)
+ return;
+
+ list_for_each_safe(list, temp, mlstack) {
+ entry = list_entry(list, struct drm_mm_node, ml_entry);
+ DRM_DEBUG("ml_entry 0x%x, size 0x%x, start 0x%x",
+ entry, entry->size, entry->start);
+
+ list_del(&entry->fl_entry);
+ list_del(&entry->ml_entry);
+ drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_sunmod.c b/usr/src/uts/common/io/drm/drm_sunmod.c
index a1ecff7f83..1cd39d29c4 100644
--- a/usr/src/uts/common/io/drm/drm_sunmod.c
+++ b/usr/src/uts/common/io/drm/drm_sunmod.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -98,7 +98,6 @@ struct cb_ops drm_cb_ops = {
D_NEW | D_MTSAFE |D_DEVMAP /* cb_flag */
};
-
int
_init(void)
{
@@ -499,6 +498,9 @@ drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
((ioctl->flags & DRM_MASTER) && !fpriv->master))
return (EACCES);
+ fpriv->dev = dev;
+ fpriv->credp = credp;
+
retval = func(dp, arg, fpriv, mode);
return (retval);
@@ -514,7 +516,7 @@ drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
drm_inst_state_t *mstate;
drm_device_t *dp;
ddi_umem_cookie_t cookie;
- drm_local_map_t *map;
+ drm_local_map_t *map = NULL;
unsigned long aperbase;
u_offset_t handle;
offset_t koff;
@@ -528,6 +530,11 @@ drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
DDI_NEVERSWAP_ACC,
DDI_STRICTORDER_ACC,
};
+ static ddi_device_acc_attr_t gem_dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC
+ };
mstate = drm_sup_devt_to_state(dev);
if (mstate == NULL)
@@ -560,29 +567,55 @@ drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
return (EINVAL);
}
- /*
- * We will solve 32-bit application on 64-bit kernel
- * issue later, now, we just use low 32-bit
- */
- handle = (u_offset_t)offset;
- handle &= 0xffffffff;
mutex_enter(&dp->dev_lock);
- TAILQ_FOREACH(map, &dp->maplist, link) {
- if (handle ==
- ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
- break;
+
+ if (dp->driver->use_gem == 1) {
+ struct idr_list *entry;
+ drm_cminor_t *mp;
+
+ mp = drm_find_file_by_minor(dp, minor);
+ if (!mp) {
+ mutex_exit(&dp->dev_lock);
+ DRM_ERROR("drm_sun_devmap: can't find authenticator");
+ return (EACCES);
+ }
+
+ spin_lock(&dp->struct_mutex);
+ idr_list_for_each(entry, &(mp->fpriv->object_idr)) {
+ if ((uintptr_t)entry->obj == (u_offset_t)offset) {
+ map = entry->obj->map;
+ goto goon;
+ }
+ }
+goon:
+ spin_unlock(&dp->struct_mutex);
}
- /*
- * Temporarily, because offset is phys_addr for register
- * and framebuffer, is kernel virtual_addr for others
- * Maybe we will use hash table to solve this issue later.
- */
if (map == NULL) {
+ /*
+ * We will solve 32-bit application on 64-bit kernel
+ * issue later, now, we just use low 32-bit
+ */
+ handle = (u_offset_t)offset;
+ handle &= 0xffffffff;
+
TAILQ_FOREACH(map, &dp->maplist, link) {
- if (handle == (map->offset & 0xffffffff))
+ if (handle ==
+ ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
break;
}
+
+ /*
+ * Temporarily, because offset is phys_addr for register
+ * and framebuffer, is kernel virtual_addr for others
+ * Maybe we will use hash table to solve this issue later.
+ */
+ if (map == NULL) {
+ TAILQ_FOREACH(map, &dp->maplist, link) {
+ if (handle == (map->offset & 0xffffffff))
+ break;
+ }
+ }
}
if (map == NULL) {
@@ -704,6 +737,20 @@ drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
*maplen = length;
break;
+ case _DRM_TTM:
+ if (map->drm_umem_cookie == NULL)
+ return (EINVAL);
+
+ if (gfxp_devmap_umem_setup(dhp, dp->dip,
+ NULL, map->drm_umem_cookie, 0, map->size, PROT_ALL,
+ IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP,
+ &gem_dev_attr)) {
+ cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
+ return (DDI_FAILURE);
+ }
+ *maplen = map->size;
+ return (DDI_SUCCESS);
+
default:
return (DDI_FAILURE);
}
diff --git a/usr/src/uts/common/sys/agp/agpdefs.h b/usr/src/uts/common/sys/agp/agpdefs.h
index 507953f83c..5ed32cb122 100644
--- a/usr/src/uts/common/sys/agp/agpdefs.h
+++ b/usr/src/uts/common/sys/agp/agpdefs.h
@@ -20,6 +20,11 @@
*/
/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -103,6 +108,7 @@ extern "C" {
#define INTEL_BR_Q45 0x2e108086
#define INTEL_BR_G45 0x2e208086
#define INTEL_BR_G41 0x2e308086
+#define INTEL_BR_B43 0x2e408086
/* AGP common register offset in pci configuration space */
#define AGP_CONF_MISC 0x51 /* one byte */
@@ -172,6 +178,36 @@ extern "C" {
#define INTEL_IGD_Q45 0x2e128086
#define INTEL_IGD_G45 0x2e228086
#define INTEL_IGD_G41 0x2e328086
+#define INTEL_IGD_B43 0x2e428086
+
+/* Intel 915 and 945 series */
+#define IS_INTEL_915(device) ((device == INTEL_IGD_915) || \
+ (device == INTEL_IGD_915GM) || \
+ (device == INTEL_IGD_945) || \
+ (device == INTEL_IGD_945GM) || \
+ (device == INTEL_IGD_945GME))
+
+/* Intel 965 series */
+#define IS_INTEL_965(device) ((device == INTEL_IGD_946GZ) || \
+ (device == INTEL_IGD_965G1) || \
+ (device == INTEL_IGD_965Q) || \
+ (device == INTEL_IGD_965G2) || \
+ (device == INTEL_IGD_965GM) || \
+ (device == INTEL_IGD_965GME) || \
+ (device == INTEL_IGD_GM45) || \
+ IS_INTEL_G4X(device))
+
+/* Intel G33 series */
+#define IS_INTEL_X33(device) ((device == INTEL_IGD_Q35) || \
+ (device == INTEL_IGD_G33) || \
+ (device == INTEL_IGD_Q33))
+
+/* Intel G4X series */
+#define IS_INTEL_G4X(device) ((device == INTEL_IGD_EL) || \
+ (device == INTEL_IGD_Q45) || \
+ (device == INTEL_IGD_G45) || \
+ (device == INTEL_IGD_G41) || \
+ (device == INTEL_IGD_B43))
/* register offsets in PCI config space */
#define I8XX_CONF_GMADR 0x10 /* GMADR of i8xx series */
@@ -296,6 +332,7 @@ extern "C" {
#define GIGA_MASK 0xC0000000
#define UI32_MASK 0xffffffffU
#define MAXAPERMEGAS 0x1000 /* Aper size no more than 4G */
+#define MINAPERMEGAS 192
#endif /* _KERNEL */
diff --git a/usr/src/uts/common/sys/agp/agpgart_impl.h b/usr/src/uts/common/sys/agp/agpgart_impl.h
index 997e0da948..a8f5b6bda6 100644
--- a/usr/src/uts/common/sys/agp/agpgart_impl.h
+++ b/usr/src/uts/common/sys/agp/agpgart_impl.h
@@ -1,13 +1,16 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_AGPGART_IMPL_H
#define _SYS_AGPGART_IMPL_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -123,6 +126,12 @@ typedef struct _agp_info32 {
} agp_info32_t;
#endif /* _MULTI_DATAMODEL */
+struct list_head {
+ struct list_head *next, *prev;
+ struct igd_gtt_seg *gttseg;
+};
+
+
typedef struct agpgart_softstate {
dev_info_t *asoft_dip;
kmutex_t asoft_instmutex;
@@ -147,6 +156,7 @@ typedef struct agpgart_softstate {
/* all registered agp device in here */
agp_registered_dev_t asoft_devreg;
kstat_t *asoft_ksp;
+ struct list_head mapped_list;
} agpgart_softstate_t;
typedef struct agpgart_ctx {
diff --git a/usr/src/uts/common/sys/agp/agpmaster_io.h b/usr/src/uts/common/sys/agp/agpmaster_io.h
index c608c9a647..1202e76a6c 100644
--- a/usr/src/uts/common/sys/agp/agpmaster_io.h
+++ b/usr/src/uts/common/sys/agp/agpmaster_io.h
@@ -20,15 +20,18 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_AGPMASTER_IO_H
#define _SYS_AGPMASTER_IO_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/usr/src/uts/common/sys/agp/agptarget_io.h b/usr/src/uts/common/sys/agp/agptarget_io.h
index 75c21dbaf7..316e2aba84 100644
--- a/usr/src/uts/common/sys/agp/agptarget_io.h
+++ b/usr/src/uts/common/sys/agp/agptarget_io.h
@@ -1,13 +1,11 @@
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_AGPTARGET_IO_H
#define _SYS_AGPTARGET_IO_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -27,6 +25,9 @@ extern "C" {
#define AGP_TARGET_FLUSH_GTLB _IO(AGPTARGETIOC_BASE, 35)
#define AGP_TARGET_CONFIGURE _IO(AGPTARGETIOC_BASE, 36)
#define AGP_TARGET_UNCONFIG _IO(AGPTARGETIOC_BASE, 37)
+#define INTEL_CHIPSET_FLUSH_SETUP _IO(AGPTARGETIOC_BASE, 38)
+#define INTEL_CHIPSET_FLUSH _IO(AGPTARGETIOC_BASE, 39)
+#define INTEL_CHIPSET_FLUSH_FREE _IO(AGPTARGETIOC_BASE, 40)
/* Internal agp info struct */
typedef struct _i_agp_info {
diff --git a/usr/src/uts/common/sys/agpgart.h b/usr/src/uts/common/sys/agpgart.h
index 2798f3d306..c8770aea88 100644
--- a/usr/src/uts/common/sys/agpgart.h
+++ b/usr/src/uts/common/sys/agpgart.h
@@ -1,10 +1,11 @@
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2000 Doug Rabson
+ * Copyright (c) 2009, Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,8 +34,6 @@
#ifndef _SYS_AGPGART_H
#define _SYS_AGPGART_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -69,6 +68,18 @@ typedef struct _agp_allocate {
uint32_t agpa_physical; /* for i810 only, private */
} agp_allocate_t;
+typedef struct _agp_bind_pages {
+ uint32_t agpb_pgstart;
+ pfn_t *agpb_pages;
+ unsigned long agpb_pgcount;
+} agp_bind_pages_t;
+
+typedef struct _agp_unbind_pages {
+ uint32_t agpb_pgstart;
+ unsigned long agpb_pgcount;
+ uint32_t agpb_type;
+} agp_unbind_pages_t;
+
typedef struct _agp_bind {
int32_t agpb_key;
uint32_t agpb_pgstart;
@@ -92,6 +103,10 @@ typedef struct _agp_unbind {
#define AGPIOC_IOREMAP_FREE _IO(AGPIOC_BASE, 9)
#define AGPIOC_READ _IO(AGPIOC_BASE, 10)
#define AGPIOC_WRITE _IO(AGPIOC_BASE, 11)
+#define AGPIOC_FLUSHCHIPSET _IO(AGPIOC_BASE, 12)
+#define AGPIOC_PAGES_BIND _IOW(AGPIOC_BASE, 13, agp_bind_pages_t)
+#define AGPIOC_PAGES_UNBIND _IOW(AGPIOC_BASE, 14, agp_unbind_pages_t)
+#define AGPIOC_PAGES_REBIND _IO(AGPIOC_BASE, 15)
/* AGP status register bits definition */
#define AGPSTAT_RQ_MASK 0xff000000 /* target only */