summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGordon Ross <gordon.w.ross@gmail.com>2016-10-09 12:44:00 -0400
committerGordon Ross <gordon.w.ross@gmail.com>2016-11-19 00:14:18 -0500
commit829150b0348ae0641087b9046fce9472cc63f9a4 (patch)
tree2851e4574a890df9a128bab04d7188ed1b747669
parent959518b561cad2acd3fbb15387db85b614d41262 (diff)
downloadillumos-gfx-drm-829150b0348ae0641087b9046fce9472cc63f9a4.tar.gz
Import DRM and related code from illumos-gate as of
commit 52c3bda44c39c1301008f3db120ba03a31e61931
-rw-r--r--usr/src/cmd/devfsadm/Makefile70
-rw-r--r--usr/src/cmd/devfsadm/Makefile.com223
-rw-r--r--usr/src/cmd/devfsadm/devfsadm.h255
-rw-r--r--usr/src/cmd/devfsadm/i386/Makefile32
-rw-r--r--usr/src/cmd/devfsadm/i386/misc_link_i386.c656
-rw-r--r--usr/src/man/man7i/agpgart_io.7i926
-rw-r--r--usr/src/pkg/manifests/driver-graphics-agpgart.mf92
-rw-r--r--usr/src/pkg/manifests/driver-graphics-drm.mf83
-rw-r--r--usr/src/pkg/manifests/system-header-header-agp.mf47
-rw-r--r--usr/src/uts/common/io/drm/THIRDPARTYLICENSE314
-rw-r--r--usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip1
-rw-r--r--usr/src/uts/common/io/drm/ati_pcigart.c128
-rw-r--r--usr/src/uts/common/io/drm/drm.h865
-rw-r--r--usr/src/uts/common/io/drm/drmP.h1103
-rw-r--r--usr/src/uts/common/io/drm/drm_agpsupport.c587
-rw-r--r--usr/src/uts/common/io/drm/drm_atomic.h94
-rw-r--r--usr/src/uts/common/io/drm/drm_auth.c173
-rw-r--r--usr/src/uts/common/io/drm/drm_bufs.c897
-rw-r--r--usr/src/uts/common/io/drm/drm_cache.c66
-rw-r--r--usr/src/uts/common/io/drm/drm_context.c447
-rw-r--r--usr/src/uts/common/io/drm/drm_dma.c157
-rw-r--r--usr/src/uts/common/io/drm/drm_drawable.c74
-rw-r--r--usr/src/uts/common/io/drm/drm_drv.c577
-rw-r--r--usr/src/uts/common/io/drm/drm_fops.c131
-rw-r--r--usr/src/uts/common/io/drm/drm_gem.c698
-rw-r--r--usr/src/uts/common/io/drm/drm_io32.h187
-rw-r--r--usr/src/uts/common/io/drm/drm_ioctl.c424
-rw-r--r--usr/src/uts/common/io/drm/drm_irq.c581
-rw-r--r--usr/src/uts/common/io/drm/drm_kstat.c99
-rw-r--r--usr/src/uts/common/io/drm/drm_linux_list.h99
-rw-r--r--usr/src/uts/common/io/drm/drm_lock.c190
-rw-r--r--usr/src/uts/common/io/drm/drm_memory.c221
-rw-r--r--usr/src/uts/common/io/drm/drm_mm.c336
-rw-r--r--usr/src/uts/common/io/drm/drm_msg.c59
-rw-r--r--usr/src/uts/common/io/drm/drm_pci.c349
-rw-r--r--usr/src/uts/common/io/drm/drm_sarea.h81
-rw-r--r--usr/src/uts/common/io/drm/drm_scatter.c186
-rw-r--r--usr/src/uts/common/io/drm/drm_stub.c526
-rw-r--r--usr/src/uts/common/io/drm/drm_sunmod.c1010
-rw-r--r--usr/src/uts/common/io/drm/drm_sunmod.h160
-rw-r--r--usr/src/uts/common/io/drm/queue.h585
-rw-r--r--usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart23
-rw-r--r--usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart.descrip1
-rw-r--r--usr/src/uts/common/sys/agp/agpamd64gart_io.h40
-rw-r--r--usr/src/uts/common/sys/agp/agpdefs.h354
-rw-r--r--usr/src/uts/common/sys/agp/agpgart_impl.h179
-rw-r--r--usr/src/uts/common/sys/agp/agpmaster_io.h100
-rw-r--r--usr/src/uts/common/sys/agp/agptarget_io.h48
-rw-r--r--usr/src/uts/common/sys/agpgart.h148
-rw-r--r--usr/src/uts/common/sys/gfx_private.h103
-rw-r--r--usr/src/uts/intel/agpgart/Makefile73
-rw-r--r--usr/src/uts/intel/agpmaster/Makefile97
-rw-r--r--usr/src/uts/intel/agptarget/Makefile67
-rw-r--r--usr/src/uts/intel/amd64_gart/Makefile62
-rw-r--r--usr/src/uts/intel/drm/Makefile90
-rw-r--r--usr/src/uts/intel/i915/Makefile93
-rw-r--r--usr/src/uts/intel/io/agpgart/agp_kstat.c138
-rw-r--r--usr/src/uts/intel/io/agpgart/agpgart.c3529
-rw-r--r--usr/src/uts/intel/io/agpgart/agpgart.conf8
-rw-r--r--usr/src/uts/intel/io/agpgart/agptarget.c953
-rw-r--r--usr/src/uts/intel/io/agpgart/amd64_gart.c439
-rw-r--r--usr/src/uts/intel/io/agpmaster/agpmaster.c732
-rw-r--r--usr/src/uts/intel/io/drm/drm_pciids.h219
-rw-r--r--usr/src/uts/intel/io/drm/i915_dma.c1146
-rw-r--r--usr/src/uts/intel/io/drm/i915_drm.h742
-rw-r--r--usr/src/uts/intel/io/drm/i915_drv.c1047
-rw-r--r--usr/src/uts/intel/io/drm/i915_drv.h1842
-rw-r--r--usr/src/uts/intel/io/drm/i915_gem.c2919
-rw-r--r--usr/src/uts/intel/io/drm/i915_gem_debug.c1108
-rw-r--r--usr/src/uts/intel/io/drm/i915_gem_tiling.c390
-rw-r--r--usr/src/uts/intel/io/drm/i915_irq.c1052
-rw-r--r--usr/src/uts/intel/io/drm/i915_mem.c425
-rw-r--r--usr/src/uts/intel/io/drm/r300_cmdbuf.c987
-rw-r--r--usr/src/uts/intel/io/drm/r300_reg.h1516
-rw-r--r--usr/src/uts/intel/io/drm/radeon_cp.c2387
-rw-r--r--usr/src/uts/intel/io/drm/radeon_drm.h800
-rw-r--r--usr/src/uts/intel/io/drm/radeon_drv.c298
-rw-r--r--usr/src/uts/intel/io/drm/radeon_drv.h1203
-rw-r--r--usr/src/uts/intel/io/drm/radeon_io32.h173
-rw-r--r--usr/src/uts/intel/io/drm/radeon_irq.c375
-rw-r--r--usr/src/uts/intel/io/drm/radeon_mem.c353
-rw-r--r--usr/src/uts/intel/io/drm/radeon_state.c3530
-rw-r--r--usr/src/uts/intel/radeon/Makefile91
83 files changed, 43669 insertions, 0 deletions
diff --git a/usr/src/cmd/devfsadm/Makefile b/usr/src/cmd/devfsadm/Makefile
new file mode 100644
index 0000000..e008e8f
--- /dev/null
+++ b/usr/src/cmd/devfsadm/Makefile
@@ -0,0 +1,70 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# cmd/devfsadm/Makefile
+#
+
+DEFAULTFILES = devfsadm.dfl
+
+ETCDEVFILES=reserved_devnames
+
+include ../Makefile.cmd
+
+#
+# One for each ISA.
+#
+SUBDIRS = $(MACH)
+
+all := TARGET= all
+install := TARGET= install
+clean := TARGET= clean
+clobber := TARGET= clobber
+_msg := TARGET= _msg
+lint := TARGET= lint
+
+ROOTETCDEV= $(ROOTETC)/dev
+ROOTETCDEVFILES=$(ETCDEVFILES:%=$(ROOTETCDEV)/%)
+$(ROOTETCDEV) := DIRMODE= 755
+$(ROOTETCDEVFILES) := FILEMODE = 0644
+
+.KEEP_STATE:
+
+all: $(SUBDIRS) $(ETCDEVFILES)
+
+clean lint _msg: $(SUBDIRS)
+
+clobber: $(SUBDIRS)
+ $(RM) $(ROOTETCDEVFILES)
+
+install: $(SUBDIRS) $(ROOTETCDEFAULTFILES) $(ROOTETCDEVFILES)
+
+$(ROOTETCDEV):
+ $(INS.dir)
+
+$(ROOTETCDEV)/% : % $(ROOTETCDEV)
+ $(INS.file)
+
+$(SUBDIRS): FRC
+ @cd $@; pwd; $(MAKE) $(TARGET)
+
+FRC:
diff --git a/usr/src/cmd/devfsadm/Makefile.com b/usr/src/cmd/devfsadm/Makefile.com
new file mode 100644
index 0000000..4df3b00
--- /dev/null
+++ b/usr/src/cmd/devfsadm/Makefile.com
@@ -0,0 +1,223 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+# This target builds both a command (daemon) and various shared objects. This
+# isn't a typical target, and the inclusion of both library and command
+# Makefiles were probably not in their original design. However, there doesn't
+# presently seem to be a clash of any required definitions.
+include ../../../lib/Makefile.lib
+include ../../Makefile.cmd
+
+COMMON = ..
+UTSBASE = $(COMMON)/../../uts
+
+DEVFSADM_MOD = devfsadm
+
+DEVALLOCSRC = devalloc.c
+
+PLCYSRC = devpolicy.c plcysubr.c
+
+MODLOADDIR = $(COMMON)/../modload
+
+DEVFSADM_SRC = $(COMMON)/$(DEVFSADM_MOD:%=%.c) \
+ $(DEVALLOCSRC:%=$(COMMON)/%) $(PLCYSRC:%=$(COMMON)/%)
+DEVFSADM_OBJ = $(DEVFSADM_MOD:%=%.o) $(DEVALLOCSRC:%.c=%.o) $(PLCYSRC:%.c=%.o)
+
+DEVFSADM_DAEMON = devfsadmd
+
+LINKMOD_DIR = linkmod
+DEVFSADM_DIR = devfsadm
+
+CLOBBERFILES = $(MODS) $(DEVLINKTAB) $(DEVFSCOMPATLINKS) $(DEVFSADM_DAEMON)
+CLOBBERFILES += $(POFILE) $(POFILES) ../plcysubr.c
+
+LINK_OBJS_CMN = \
+ disk_link.o \
+ ieee1394_link.o \
+ dcam1394_link.o \
+ tape_link.o \
+ usb_link.o \
+ port_link.o \
+ audio_link.o \
+ cfg_link.o \
+ misc_link.o \
+ lofi_link.o \
+ ramdisk_link.o \
+ fssnap_link.o \
+ sgen_link.o \
+ smp_link.o \
+ md_link.o \
+ dtrace_link.o \
+ vscan_link.o \
+ zfs_link.o \
+ zut_link.o
+
+LINK_OBJS = $(LINK_OBJS_CMN) \
+ $(LINK_OBJS_$(MACH))
+
+LINK_SRCS = $(LINK_OBJS_CMN:%.o=$(COMMON)/%.c) \
+ $(LINK_OBJS_$(MACH):%.o=%.c)
+
+LINT_MODULES = $(LINK_SRCS:%.c=%.ln)
+
+LINK_MODS = $(LINK_OBJS:%.o=SUNW_%.so)
+
+DEVLINKTAB = devlink.tab
+DEVLINKTAB_SRC = $(COMMON)/$(DEVLINKTAB).sh
+
+COMPAT_LINKS = disks tapes ports audlinks devlinks drvconfig
+
+CPPFLAGS += -D_POSIX_PTHREAD_SEMANTICS -D_REENTRANT \
+ -I$(COMMON) -I$(UTSBASE)/common -I$(MODLOADDIR)
+CFLAGS += $(CCVERBOSE) $(C_PICFLAGS)
+
+LINTFLAGS += -erroff=E_NAME_USED_NOT_DEF2
+LINTFLAGS += -erroff=E_NAME_DEF_NOT_USED2
+LINTFLAGS += -erroff=E_NAME_MULTIPLY_DEF2
+
+CERRWARN += -_gcc=-Wno-uninitialized
+CERRWARN += -_gcc=-Wno-char-subscripts
+CERRWARN += -_gcc=-Wno-parentheses
+
+# Define the dependencies required by devfsadm and all shared objects.
+LDLIBS += -ldevinfo
+devfsadm := LDLIBS += -lgen -lsysevent -lnvpair -lzonecfg -lbsm
+SUNW_md_link.so := LDLIBS += -lmeta
+SUNW_disk_link.so := LDLIBS += -ldevid
+SUNW_sgen_link.so := LDLIBS += -ldevid
+
+# All libraries are built from the same SUNW_%.so rule (see below), and define
+# their own SONAME using -h explicitly. Null the generic -h macro that gets
+# inherited from Makefile.lib, otherwise we'll get two -h definitions.
+HSONAME =
+
+SRCS = $(DEVFSADM_SRC) $(LINK_SRCS)
+OBJS = $(DEVFSADM_OBJ) $(LINK_OBJS)
+MODS = $(DEVFSADM_MOD) $(LINK_MODS)
+
+POFILES = $(LINK_SRCS:.c=.po) $(DEVFSADM_SRC:.c=.po)
+POFILE = pdevfsadm.po
+
+# install specifics
+
+ROOTLIB_DEVFSADM = $(ROOTLIB)/$(DEVFSADM_DIR)
+ROOTLIB_DEVFSADM_LINKMOD = $(ROOTLIB_DEVFSADM)/$(LINKMOD_DIR)
+
+ROOTLIB_DEVFSADM_LINK_MODS = $(LINK_MODS:%=$(ROOTLIB_DEVFSADM_LINKMOD)/%)
+
+ROOTUSRSBIN_COMPAT_LINKS = $(COMPAT_LINKS:%=$(ROOTUSRSBIN)/%)
+
+ROOTUSRSBIN_DEVFSADM = $(DEVFSADM_MOD:%=$(ROOTUSRSBIN)/%)
+
+ROOTLIB_DEVFSADM_DAEMON = $(ROOTLIB_DEVFSADM)/$(DEVFSADM_DAEMON)
+
+ROOTETC_DEVLINKTAB = $(DEVLINKTAB:%=$(ROOTETC)/%)
+
+FILEMODE= 755
+
+$(ROOTETC_DEVLINKTAB) := FILEMODE = 644
+
+all := TARGET= all
+install := TARGET= install
+clean := TARGET= clean
+clobber := TARGET= clobber
+lint := TARGET= lint
+
+
+.KEEP_STATE:
+
+all: $(MODS) $(DEVLINKTAB)
+
+install: all \
+ $(ROOTLIB_DEVFSADM) \
+ $(ROOTLIB_DEVFSADM_LINKMOD) \
+ $(ROOTUSRSBIN_DEVFSADM) \
+ $(ROOTETC_DEVLINKTAB) \
+ $(ROOTLIB_DEVFSADM_LINK_MODS) \
+ $(ROOTUSRINCLUDE) \
+ $(ROOTLIB_DEVFSADM_DAEMON) \
+ $(ROOTUSRSBIN_COMPAT_LINKS)
+
+
+clean:
+ $(RM) $(OBJS)
+
+
+lint: $(DEVFSADM_MOD).ln $(LINT_MODULES)
+
+devfsadm.ln: $(DEVFSADM_SRC)
+ $(LINT.c) $(DEVFSADM_SRC) $(LDLIBS)
+
+%.ln: $(DEVFSADM_SRC) %.c
+ $(LINT.c) $(DEVFSADM_SRC) $(@:.ln=.c) $(LDLIBS)
+
+include ../../Makefile.targ
+
+$(POFILE): $(POFILES)
+ $(RM) $@; cat $(POFILES) > $@
+
+$(DEVFSADM_MOD): $(DEVFSADM_OBJ)
+ $(LINK.c) -o $@ $< $(DEVFSADM_OBJ) $(LDLIBS)
+ $(POST_PROCESS)
+
+SUNW_%.so: %.o $(MAPFILES)
+ $(CC) -o $@ $(GSHARED) $(DYNFLAGS) -h $@ $< $(LDLIBS) -lc
+ $(POST_PROCESS_SO)
+
+%.o: $(COMMON)/%.c
+ $(COMPILE.c) -o $@ $< $(CTFCONVERT_HOOK)
+ $(POST_PROCESS_O)
+
+
+$(DEVLINKTAB): $(DEVLINKTAB_SRC)
+ $(RM) $(DEVLINKTAB)
+ /bin/sh $(DEVLINKTAB_SRC) > $(DEVLINKTAB)
+
+$(ROOTUSRSBIN):
+ $(INS.dir)
+
+$(ROOTLIB_DEVFSADM):
+ $(INS.dir)
+
+$(ROOTUSRINCLUDE):
+ $(INS.dir)
+
+$(ROOTLIB_DEVFSADM_LINKMOD):
+ $(INS.dir)
+
+$(ROOTLIB_DEVFSADM_LINKMOD)/%: %
+ $(INS.file)
+
+$(ROOTLIB_DEVFSADM_DAEMON):
+ $(RM) $@; $(SYMLINK) ../../sbin/$(DEVFSADM_DIR) $@
+
+$(ROOTUSRSBIN_COMPAT_LINKS): $(ROOTUSRSBIN_DEVFSADM)
+ $(RM) $@ ; $(LN) $(ROOTUSRSBIN_DEVFSADM) $@
+
+#
+# Source shared with add_drv/update_drv
+#
+../plcysubr.c:
+ rm -f $@
+ ln -s ../modload/plcysubr.c ..
diff --git a/usr/src/cmd/devfsadm/devfsadm.h b/usr/src/cmd/devfsadm/devfsadm.h
new file mode 100644
index 0000000..3d801f6
--- /dev/null
+++ b/usr/src/cmd/devfsadm/devfsadm.h
@@ -0,0 +1,255 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _DEVFSADM_H
+#define _DEVFSADM_H
+
+#include <sys/types.h>
+#include <libdevinfo.h>
+#include <sys/devinfo_impl.h>
+#include <regex.h>
+
+#undef DEBUG
+#ifndef DEBUG
+#define NDEBUG 1
+#else
+#undef NDEBUG
+#endif
+
+#include <assert.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define DEVFSADM_SUCCESS 0
+#define DEVFSADM_FAILURE -1
+#define DEVFSADM_MULTIPLE -2
+#define DEVFSADM_TRUE 0
+#define DEVFSADM_FALSE -1
+
+#define ILEVEL_0 0
+#define ILEVEL_1 1
+#define ILEVEL_2 2
+#define ILEVEL_3 3
+#define ILEVEL_4 4
+#define ILEVEL_5 5
+#define ILEVEL_6 6
+#define ILEVEL_7 7
+#define ILEVEL_8 8
+#define ILEVEL_9 9
+
+#define DEVFSADM_V0 0
+#define DEVFSADM_V1 1
+
+#define DEVFSADM_CONTINUE 0
+#define DEVFSADM_TERMINATE 1
+
+#define INTEGER 0
+#define CHARACTER 1
+
+#define RM_HOT 0x01
+#define RM_PRE 0x02
+#define RM_POST 0x04
+#define RM_ALWAYS 0x08
+#define RM_NOINTERPOSE 0x10
+
+#define TYPE_EXACT 0x01
+#define TYPE_RE 0x02
+#define TYPE_PARTIAL 0x04
+#define TYPE_MASK 0x07
+#define DRV_EXACT 0x10
+#define DRV_RE 0x20
+#define DRV_MASK 0x30
+#define CREATE_DEFER 0x100
+#define CREATE_MASK 0x100
+
+/* command to start daemon */
+#define DEVFSADMD_START_PATH "/usr/lib/devfsadm/devfsadmd"
+#define DEVFSADMD_START "devfsadmd"
+
+/* devfsadm event service door */
+#define DEVFSADM_SERVICE_DOOR "/etc/sysevent/devfsadm_event_channel"
+#define DEVNAME_LOOKUP_DOOR ".devname_lookup_door"
+
+/* File of reserved devnames */
+#define ENUMERATE_RESERVED "/etc/dev/reserved_devnames"
+
+/* flags for devfsadm_mklink */
+#define DEV_SYNC 0x02 /* synchronous mklink */
+
+#define INFO_MID NULL /* always prints */
+#define VERBOSE_MID "verbose" /* prints with -v */
+#define CHATTY_MID "chatty" /* prints with -V chatty */
+
+typedef struct devfsadm_create {
+ char *device_class; /* eg "disk", "tape", "display" */
+ char *node_type; /* eg DDI_NT_TAPE, DDI_NT_BLOCK, etc */
+ char *drv_name; /* eg sd, ssd */
+ int flags; /* TYPE_{EXACT,RE,PARTIAL}, DRV_{EXACT,RE} */
+ int interpose_lvl; /* eg ILEVEL_0.. ILEVEL_10 */
+ int (*callback_fcn)(di_minor_t minor, di_node_t node);
+} devfsadm_create_t;
+
+typedef struct devfsadm_remove {
+ char *device_class; /* eg "disk", "tape", "display" */
+ char *dev_dirs_re; /* dev dirs regex selector */
+ int flags; /* eg POST, PRE, HOT, ALWAYS */
+ int interpose_lvl; /* eg ILEVEL_0 .. ILEVEL_10 */
+ void (*callback_fcn)(char *);
+} devfsadm_remove_t;
+
+typedef struct devfsadm_remove_V1 {
+ char *device_class; /* eg "disk", "tape", "display" */
+ char *dev_dirs_re; /* dev dirs regex selector */
+ int flags; /* eg POST, PRE, HOT, ALWAYS */
+ int interpose_lvl; /* eg ILEVEL_0 .. ILEVEL_10 */
+ int (*callback_fcn)(char *);
+} devfsadm_remove_V1_t;
+
+typedef struct _devfsadm_create_reg {
+ uint_t version;
+ uint_t count; /* number of node type registration */
+ /* structures */
+ devfsadm_create_t *tblp;
+} _devfsadm_create_reg_t;
+
+typedef struct _devfsadm_remove_reg {
+ uint_t version;
+ uint_t count; /* number of node type registration */
+ /* structures */
+ devfsadm_remove_t *tblp;
+} _devfsadm_remove_reg_t;
+
+typedef struct _devfsadm_remove_reg_V1 {
+ uint_t version;
+ uint_t count; /* number of node type registration */
+ /* structures */
+ devfsadm_remove_V1_t *tblp;
+} _devfsadm_remove_reg_V1_t;
+/*
+ * "flags" in the devfs_enumerate structure can take the following values.
+ * These values specify the substring of devfs path to be used for
+ * enumeration. Components (see MATCH_ADDR/MATCH_MINOR) may be specified
+ * by using the "match_arg" member in the devfsadm_enumerate structure.
+ */
+#define MATCH_ALL 0x001 /* Match entire devfs path */
+#define MATCH_PARENT 0x002 /* Match upto last '/' in devfs path */
+#define MATCH_ADDR 0x004 /* Match upto nth component of last address */
+#define MATCH_MINOR 0x008 /* Match upto nth component of minor name */
+#define MATCH_CALLBACK 0x010 /* Use callback to derive match string */
+
+/*
+ * The following flags are private to devfsadm and the disks module.
+ * NOT to be used by other modules.
+ */
+#define MATCH_NODE 0x020
+#define MATCH_MASK 0x03F
+#define MATCH_UNCACHED 0x040 /* retry flags for disks module */
+
+typedef struct devfsadm_enumerate {
+ char *re;
+ int subexp;
+ uint_t flags;
+ char *match_arg;
+ char *(*sel_fcn)(const char *path, void *cb_arg);
+ void *cb_arg;
+} devfsadm_enumerate_t;
+
+#define DEVFSADM_CREATE_INIT_V0(tbl) \
+ _devfsadm_create_reg_t _devfsadm_create_reg = { \
+ DEVFSADM_V0, \
+ (sizeof (tbl) / sizeof (devfsadm_create_t)), \
+ ((devfsadm_create_t *)(tbl)) }
+
+#define DEVFSADM_REMOVE_INIT_V0(tbl)\
+ _devfsadm_remove_reg_t _devfsadm_remove_reg = {\
+ DEVFSADM_V0, \
+ (sizeof (tbl) / sizeof (devfsadm_remove_t)), \
+ ((devfsadm_remove_t *)(tbl)) }
+
+#define DEVFSADM_REMOVE_INIT_V1(tbl)\
+ _devfsadm_remove_reg_V1_t _devfsadm_remove_reg = {\
+ DEVFSADM_V1, \
+ (sizeof (tbl) / sizeof (devfsadm_remove_V1_t)), \
+ ((devfsadm_remove_V1_t *)(tbl)) }
+
+/* reserved devname support */
+typedef struct devlink_re {
+ char *d_re;
+ int d_subexp;
+ regex_t d_rcomp;
+ regmatch_t *d_pmatch;
+} devlink_re_t;
+
+typedef struct enumerate_file {
+ char *er_file;
+ char *er_id;
+ struct enumerate_file *er_next;
+} enumerate_file_t;
+
+int devfsadm_noupdate(void);
+const char *devfsadm_root_path(void);
+int devfsadm_link_valid(di_node_t anynode, char *link);
+int devfsadm_mklink(char *link, di_node_t node, di_minor_t minor, int flags);
+int devfsadm_secondary_link(char *link, char *primary_link, int flags);
+void devfsadm_rm_link(char *file);
+void devfsadm_rm_all(char *file);
+void devfsadm_rm_stale_links(char *dir_re, char *valid_link, di_node_t node,
+ di_minor_t minor);
+void devfsadm_errprint(char *message, ...);
+void devfsadm_print(char *mid, char *message, ...);
+int devfsadm_enumerate_int(char *devfs_path, int index, char **buf,
+ devfsadm_enumerate_t rules[], int nrules);
+int devfsadm_enumerate_char(char *devfs_path, int index, char **buf,
+ devfsadm_enumerate_t rules[], int nrules);
+char **devfsadm_lookup_dev_names(char *phys_path, char *re, int *lenp);
+void devfsadm_free_dev_names(char **dev_names, int len);
+
+/* devlink cache related */
+di_devlink_handle_t devfsadm_devlink_cache(void);
+
+/*
+ * Private enumerate interface for disks and sgen modules
+ */
+int disk_enumerate_int(char *devfs_path, int index, char **buf,
+ devfsadm_enumerate_t rules[], int nrules);
+/*
+ * Private interfaces for ports module (port_link.c).
+ */
+int devfsadm_enumerate_char_start(char *devfs_path, int index,
+ char **buf, devfsadm_enumerate_t rules[], int nrules, char *start);
+int devfsadm_read_link(di_node_t node, char *link, char **devfs_path);
+char *s_strdup(const char *ptr);
+
+/* Private interface between reserve subsystm and disks link generator */
+int devfsadm_have_reserved(void);
+int devfsadm_is_reserved(devlink_re_t re_array[], char *devlink);
+int devfsadm_reserve_id_cache(devlink_re_t re_array[], enumerate_file_t *head);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DEVFSADM_H */
diff --git a/usr/src/cmd/devfsadm/i386/Makefile b/usr/src/cmd/devfsadm/i386/Makefile
new file mode 100644
index 0000000..1f14c93
--- /dev/null
+++ b/usr/src/cmd/devfsadm/i386/Makefile
@@ -0,0 +1,32 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+#
+
+LINK_OBJS_i386 = \
+ misc_link_i386.o \
+ xen_link.o
+
+xen_link.o xen_link.ln xen_link.po := CPPFLAGS += -I$(UTSBASE)/i86xpv
+
+include ../Makefile.com
+
diff --git a/usr/src/cmd/devfsadm/i386/misc_link_i386.c b/usr/src/cmd/devfsadm/i386/misc_link_i386.c
new file mode 100644
index 0000000..5d2e18a
--- /dev/null
+++ b/usr/src/cmd/devfsadm/i386/misc_link_i386.c
@@ -0,0 +1,656 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ * Copyright 2012 Joyent, Inc. All rights reserved.
+ */
+
+#include <regex.h>
+#include <devfsadm.h>
+#include <stdio.h>
+#include <strings.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <ctype.h>
+#include <sys/mc_amd.h>
+#include <bsm/devalloc.h>
+
+extern int system_labeled;
+
+static int lp(di_minor_t minor, di_node_t node);
+static int serial_dialout(di_minor_t minor, di_node_t node);
+static int serial(di_minor_t minor, di_node_t node);
+static int diskette(di_minor_t minor, di_node_t node);
+static int vt00(di_minor_t minor, di_node_t node);
+static int kdmouse(di_minor_t minor, di_node_t node);
+static int ipmi(di_minor_t minor, di_node_t node);
+static int smbios(di_minor_t minor, di_node_t node);
+static int agp_process(di_minor_t minor, di_node_t node);
+static int drm_node(di_minor_t minor, di_node_t node);
+static int mc_node(di_minor_t minor, di_node_t node);
+static int xsvc(di_minor_t minor, di_node_t node);
+static int srn(di_minor_t minor, di_node_t node);
+static int ucode(di_minor_t minor, di_node_t node);
+static int heci(di_minor_t minor, di_node_t node);
+
+
+static devfsadm_create_t misc_cbt[] = {
+ { "vt00", "ddi_display", NULL,
+ TYPE_EXACT, ILEVEL_0, vt00
+ },
+ { "drm", "ddi_display:drm", NULL,
+ TYPE_EXACT, ILEVEL_0, drm_node
+ },
+ { "mouse", "ddi_mouse", "mouse8042",
+ TYPE_EXACT | DRV_EXACT, ILEVEL_0, kdmouse
+ },
+ { "pseudo", "ddi_pseudo", "ipmi",
+ TYPE_EXACT | DRV_EXACT, ILEVEL_0, ipmi,
+ },
+ { "pseudo", "ddi_pseudo", "smbios",
+ TYPE_EXACT | DRV_EXACT, ILEVEL_1, smbios,
+ },
+ /* floppies share the same class, but not link regex, as hard disks */
+ { "disk", "ddi_block:diskette", NULL,
+ TYPE_EXACT, ILEVEL_1, diskette
+ },
+ { "parallel", "ddi_printer", NULL,
+ TYPE_EXACT, ILEVEL_1, lp
+ },
+ { "serial", "ddi_serial:mb", NULL,
+ TYPE_EXACT, ILEVEL_1, serial
+ },
+ { "serial", "ddi_serial:dialout,mb", NULL,
+ TYPE_EXACT, ILEVEL_1, serial_dialout
+ },
+ { "agp", "ddi_agp:pseudo", NULL,
+ TYPE_EXACT, ILEVEL_0, agp_process
+ },
+ { "agp", "ddi_agp:target", NULL,
+ TYPE_EXACT, ILEVEL_0, agp_process
+ },
+ { "agp", "ddi_agp:cpugart", NULL,
+ TYPE_EXACT, ILEVEL_0, agp_process
+ },
+ { "agp", "ddi_agp:master", NULL,
+ TYPE_EXACT, ILEVEL_0, agp_process
+ },
+ { "pseudo", "ddi_pseudo", NULL,
+ TYPE_EXACT, ILEVEL_0, xsvc
+ },
+ { "pseudo", "ddi_pseudo", NULL,
+ TYPE_EXACT, ILEVEL_0, srn
+ },
+ { "memory-controller", "ddi_mem_ctrl", NULL,
+ TYPE_EXACT, ILEVEL_0, mc_node
+ },
+ { "pseudo", "ddi_pseudo", "ucode",
+ TYPE_EXACT | DRV_EXACT, ILEVEL_0, ucode,
+ },
+ { "pseudo", "ddi_pseudo", "heci",
+ TYPE_EXACT | DRV_EXACT, ILEVEL_0, heci,
+ }
+};
+
+DEVFSADM_CREATE_INIT_V0(misc_cbt);
+
+static char *debug_mid = "misc_mid";
+
+typedef enum {
+ DRIVER_AGPPSEUDO = 0,
+ DRIVER_AGPTARGET,
+ DRIVER_CPUGART,
+ DRIVER_AGPMASTER_DRM_I915,
+ DRIVER_AGPMASTER_DRM_RADEON,
+ DRIVER_AGPMASTER_VGATEXT,
+ DRIVER_UNKNOWN
+} driver_defs_t;
+
+typedef struct {
+ char *driver_name;
+ int index;
+} driver_name_table_entry_t;
+
+static driver_name_table_entry_t driver_name_table[] = {
+ { "agpgart", DRIVER_AGPPSEUDO },
+ { "agptarget", DRIVER_AGPTARGET },
+ { "amd64_gart", DRIVER_CPUGART },
+ /* AGP master device managed by drm driver */
+ { "i915", DRIVER_AGPMASTER_DRM_I915 },
+ { "radeon", DRIVER_AGPMASTER_DRM_RADEON },
+ { "vgatext", DRIVER_AGPMASTER_VGATEXT },
+ { NULL, DRIVER_UNKNOWN }
+};
+
+static devfsadm_enumerate_t agptarget_rules[1] =
+ { "^agp$/^agptarget([0-9]+)$", 1, MATCH_ALL };
+static devfsadm_enumerate_t cpugart_rules[1] =
+ { "^agp$/^cpugart([0-9]+)$", 1, MATCH_ALL };
+static devfsadm_enumerate_t agpmaster_rules[1] =
+ { "^agp$/^agpmaster([0-9]+)$", 1, MATCH_ALL };
+
+static devfsadm_remove_t misc_remove_cbt[] = {
+ { "vt", "vt[0-9][0-9]", RM_PRE|RM_ALWAYS,
+ ILEVEL_0, devfsadm_rm_all
+ },
+ { "pseudo", "^ucode$", RM_ALWAYS | RM_PRE | RM_HOT,
+ ILEVEL_0, devfsadm_rm_all
+ },
+ { "mouse", "^kdmouse$", RM_ALWAYS | RM_PRE,
+ ILEVEL_0, devfsadm_rm_all
+ },
+ { "disk", "^(diskette|rdiskette)([0-9]*)$",
+ RM_ALWAYS | RM_PRE, ILEVEL_1, devfsadm_rm_all
+ },
+ { "parallel", "^(lp|ecpp)([0-9]+)$", RM_ALWAYS | RM_PRE,
+ ILEVEL_1, devfsadm_rm_all
+ },
+ { "serial", "^(tty|ttyd)([0-9]+)$", RM_ALWAYS | RM_PRE,
+ ILEVEL_1, devfsadm_rm_all
+ },
+ { "serial", "^tty[a-z]$", RM_ALWAYS | RM_PRE,
+ ILEVEL_1, devfsadm_rm_all
+ }
+};
+
+DEVFSADM_REMOVE_INIT_V0(misc_remove_cbt);
+
+/*
+ * Handles minor node type "ddi_display", in addition to generic processing
+ * done by display().
+ *
+ * This creates a /dev/vt00 link to /dev/fb, for backwards compatibility.
+ */
+/* ARGSUSED */
+int
+vt00(di_minor_t minor, di_node_t node)
+{
+ (void) devfsadm_secondary_link("vt00", "fb", 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+/*
+ * type=ddi_block:diskette;addr=0,0;minor=c diskette
+ * type=ddi_block:diskette;addr=0,0;minor=c,raw rdiskette
+ * type=ddi_block:diskette;addr1=0;minor=c diskette\A2
+ * type=ddi_block:diskette;addr1=0;minor=c,raw rdiskette\A2
+ */
+static int
+diskette(di_minor_t minor, di_node_t node)
+{
+ int flags = 0;
+ char *a2;
+ char link[PATH_MAX];
+ char *addr = di_bus_addr(node);
+ char *mn = di_minor_name(minor);
+
+ if (system_labeled)
+ flags = DA_ADD|DA_FLOPPY;
+
+ if (strcmp(addr, "0,0") == 0) {
+ if (strcmp(mn, "c") == 0) {
+ (void) devfsadm_mklink("diskette", node, minor, flags);
+ } else if (strcmp(mn, "c,raw") == 0) {
+ (void) devfsadm_mklink("rdiskette", node, minor, flags);
+ }
+
+ }
+
+ if (addr[0] == '0') {
+ if ((a2 = strchr(addr, ',')) != NULL) {
+ a2++;
+ if (strcmp(mn, "c") == 0) {
+ (void) strcpy(link, "diskette");
+ (void) strcat(link, a2);
+ (void) devfsadm_mklink(link, node, minor,
+ flags);
+ } else if (strcmp(mn, "c,raw") == 0) {
+ (void) strcpy(link, "rdiskette");
+ (void) strcat(link, a2);
+ (void) devfsadm_mklink(link, node, minor,
+ flags);
+ }
+ }
+ }
+
+ return (DEVFSADM_CONTINUE);
+}
+
+/*
+ * type=ddi_printer;name=lp;addr=1,3bc lp0
+ * type=ddi_printer;name=lp;addr=1,378 lp1
+ * type=ddi_printer;name=lp;addr=1,278 lp2
+ */
+static int
+lp(di_minor_t minor, di_node_t node)
+{
+ char *addr = di_bus_addr(node);
+ char *buf;
+ char path[PATH_MAX + 1];
+ devfsadm_enumerate_t rules[1] = {"^ecpp([0-9]+)$", 1, MATCH_ALL};
+
+ if (strcmp(addr, "1,3bc") == 0) {
+ (void) devfsadm_mklink("lp0", node, minor, 0);
+
+ } else if (strcmp(addr, "1,378") == 0) {
+ (void) devfsadm_mklink("lp1", node, minor, 0);
+
+ } else if (strcmp(addr, "1,278") == 0) {
+ (void) devfsadm_mklink("lp2", node, minor, 0);
+ }
+
+ if (strcmp(di_driver_name(node), "ecpp") != 0) {
+ return (DEVFSADM_CONTINUE);
+ }
+
+ if ((buf = di_devfs_path(node)) == NULL) {
+ return (DEVFSADM_CONTINUE);
+ }
+
+ (void) snprintf(path, sizeof (path), "%s:%s",
+ buf, di_minor_name(minor));
+
+ di_devfs_path_free(buf);
+
+ if (devfsadm_enumerate_int(path, 0, &buf, rules, 1)) {
+ return (DEVFSADM_CONTINUE);
+ }
+
+ (void) snprintf(path, sizeof (path), "ecpp%s", buf);
+ free(buf);
+ (void) devfsadm_mklink(path, node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+/*
+ * type=ddi_serial:mb;minor=a tty00
+ * type=ddi_serial:mb;minor=b tty01
+ * type=ddi_serial:mb;minor=c tty02
+ * type=ddi_serial:mb;minor=d tty03
+ */
+static int
+serial(di_minor_t minor, di_node_t node)
+{
+
+ char *mn = di_minor_name(minor);
+ char link[PATH_MAX];
+
+ (void) strcpy(link, "tty");
+ (void) strcat(link, mn);
+ (void) devfsadm_mklink(link, node, minor, 0);
+
+ if (strcmp(mn, "a") == 0) {
+ (void) devfsadm_mklink("tty00", node, minor, 0);
+
+ } else if (strcmp(mn, "b") == 0) {
+ (void) devfsadm_mklink("tty01", node, minor, 0);
+
+ } else if (strcmp(mn, "c") == 0) {
+ (void) devfsadm_mklink("tty02", node, minor, 0);
+
+ } else if (strcmp(mn, "d") == 0) {
+ (void) devfsadm_mklink("tty03", node, minor, 0);
+ }
+ return (DEVFSADM_CONTINUE);
+}
+
+/*
+ * type=ddi_serial:dialout,mb;minor=a,cu ttyd0
+ * type=ddi_serial:dialout,mb;minor=b,cu ttyd1
+ * type=ddi_serial:dialout,mb;minor=c,cu ttyd2
+ * type=ddi_serial:dialout,mb;minor=d,cu ttyd3
+ */
+static int
+serial_dialout(di_minor_t minor, di_node_t node)
+{
+ char *mn = di_minor_name(minor);
+
+ if (strcmp(mn, "a,cu") == 0) {
+ (void) devfsadm_mklink("ttyd0", node, minor, 0);
+ (void) devfsadm_mklink("cua0", node, minor, 0);
+
+ } else if (strcmp(mn, "b,cu") == 0) {
+ (void) devfsadm_mklink("ttyd1", node, minor, 0);
+ (void) devfsadm_mklink("cua1", node, minor, 0);
+
+ } else if (strcmp(mn, "c,cu") == 0) {
+ (void) devfsadm_mklink("ttyd2", node, minor, 0);
+ (void) devfsadm_mklink("cua2", node, minor, 0);
+
+ } else if (strcmp(mn, "d,cu") == 0) {
+ (void) devfsadm_mklink("ttyd3", node, minor, 0);
+ (void) devfsadm_mklink("cua3", node, minor, 0);
+ }
+ return (DEVFSADM_CONTINUE);
+}
+
+static int
+kdmouse(di_minor_t minor, di_node_t node)
+{
+ (void) devfsadm_mklink("kdmouse", node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+static int
+ipmi(di_minor_t minor, di_node_t node)
+{
+ /*
+ * Follow convention from other systems, and include an instance#,
+ * even though there will only be one.
+ */
+ (void) devfsadm_mklink("ipmi0", node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+static int
+smbios(di_minor_t minor, di_node_t node)
+{
+ (void) devfsadm_mklink("smbios", node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+static int
+agp_process(di_minor_t minor, di_node_t node)
+{
+ char *minor_nm, *drv_nm;
+ char *devfspath;
+ char *I_path, *p_path, *buf;
+ char *name = (char *)NULL;
+ int i, index;
+ devfsadm_enumerate_t rules[1];
+
+ minor_nm = di_minor_name(minor);
+ drv_nm = di_driver_name(node);
+
+ if ((minor_nm == NULL) || (drv_nm == NULL)) {
+ return (DEVFSADM_CONTINUE);
+ }
+
+ devfsadm_print(debug_mid, "agp_process: minor=%s node=%s\n",
+ minor_nm, di_node_name(node));
+
+ devfspath = di_devfs_path(node);
+ if (devfspath == NULL) {
+ devfsadm_print(debug_mid, "agp_process: devfspath is NULL\n");
+ return (DEVFSADM_CONTINUE);
+ }
+
+ I_path = (char *)malloc(PATH_MAX);
+
+ if (I_path == NULL) {
+ di_devfs_path_free(devfspath);
+ devfsadm_print(debug_mid, "agp_process: malloc failed\n");
+ return (DEVFSADM_CONTINUE);
+ }
+
+ p_path = (char *)malloc(PATH_MAX);
+
+ if (p_path == NULL) {
+ devfsadm_print(debug_mid, "agp_process: malloc failed\n");
+ di_devfs_path_free(devfspath);
+ free(I_path);
+ return (DEVFSADM_CONTINUE);
+ }
+
+ (void) strlcpy(p_path, devfspath, PATH_MAX);
+ (void) strlcat(p_path, ":", PATH_MAX);
+ (void) strlcat(p_path, minor_nm, PATH_MAX);
+ di_devfs_path_free(devfspath);
+
+ devfsadm_print(debug_mid, "agp_process: path %s\n", p_path);
+
+ for (i = 0; ; i++) {
+ if ((driver_name_table[i].driver_name == NULL) ||
+ (strcmp(drv_nm, driver_name_table[i].driver_name) == 0)) {
+ index = driver_name_table[i].index;
+ break;
+ }
+ }
+ switch (index) {
+ case DRIVER_AGPPSEUDO:
+ devfsadm_print(debug_mid,
+ "agp_process: psdeudo driver name\n");
+ name = "agpgart";
+ (void) snprintf(I_path, PATH_MAX, "%s", name);
+ devfsadm_print(debug_mid,
+ "mklink %s -> %s\n", I_path, p_path);
+
+ (void) devfsadm_mklink(I_path, node, minor, 0);
+
+ free(I_path);
+ free(p_path);
+ return (DEVFSADM_CONTINUE);
+ case DRIVER_AGPTARGET:
+ devfsadm_print(debug_mid,
+ "agp_process: target driver name\n");
+ rules[0] = agptarget_rules[0];
+ name = "agptarget";
+ break;
+ case DRIVER_CPUGART:
+ devfsadm_print(debug_mid,
+ "agp_process: cpugart driver name\n");
+ rules[0] = cpugart_rules[0];
+ name = "cpugart";
+ break;
+ case DRIVER_AGPMASTER_DRM_I915:
+ case DRIVER_AGPMASTER_DRM_RADEON:
+ case DRIVER_AGPMASTER_VGATEXT:
+ devfsadm_print(debug_mid,
+ "agp_process: agpmaster driver name\n");
+ rules[0] = agpmaster_rules[0];
+ name = "agpmaster";
+ break;
+ case DRIVER_UNKNOWN:
+ devfsadm_print(debug_mid,
+ "agp_process: unknown driver name=%s\n", drv_nm);
+ free(I_path);
+ free(p_path);
+ return (DEVFSADM_CONTINUE);
+ }
+
+ if (devfsadm_enumerate_int(p_path, 0, &buf, rules, 1)) {
+ devfsadm_print(debug_mid, "agp_process: exit/coninue\n");
+ free(I_path);
+ free(p_path);
+ return (DEVFSADM_CONTINUE);
+ }
+
+
+ (void) snprintf(I_path, PATH_MAX, "agp/%s%s", name, buf);
+
+ devfsadm_print(debug_mid, "agp_process: p_path=%s buf=%s\n",
+ p_path, buf);
+
+ free(buf);
+
+ devfsadm_print(debug_mid, "mklink %s -> %s\n", I_path, p_path);
+
+ (void) devfsadm_mklink(I_path, node, minor, 0);
+
+ free(p_path);
+ free(I_path);
+
+ return (DEVFSADM_CONTINUE);
+}
+
+static int
+drm_node(di_minor_t minor, di_node_t node)
+{
+ char *minor_nm, *drv_nm;
+ char *devfspath;
+ char *I_path, *p_path, *buf;
+ char *name = "card";
+
+ devfsadm_enumerate_t drm_rules[1] = {"^dri$/^card([0-9]+)$", 1,
+ MATCH_ALL };
+
+
+ minor_nm = di_minor_name(minor);
+ drv_nm = di_driver_name(node);
+ if ((minor_nm == NULL) || (drv_nm == NULL)) {
+ return (DEVFSADM_CONTINUE);
+ }
+
+ devfsadm_print(debug_mid, "drm_node: minor=%s node=%s type=%s\n",
+ minor_nm, di_node_name(node), di_minor_nodetype(minor));
+
+ devfspath = di_devfs_path(node);
+ if (devfspath == NULL) {
+ devfsadm_print(debug_mid, "drm_node: devfspath is NULL\n");
+ return (DEVFSADM_CONTINUE);
+ }
+
+ I_path = (char *)malloc(PATH_MAX);
+
+ if (I_path == NULL) {
+ di_devfs_path_free(devfspath);
+ devfsadm_print(debug_mid, "drm_node: malloc failed\n");
+ return (DEVFSADM_CONTINUE);
+ }
+
+ p_path = (char *)malloc(PATH_MAX);
+
+ if (p_path == NULL) {
+ devfsadm_print(debug_mid, "drm_node: malloc failed\n");
+ di_devfs_path_free(devfspath);
+ free(I_path);
+ return (DEVFSADM_CONTINUE);
+ }
+
+ (void) strlcpy(p_path, devfspath, PATH_MAX);
+ (void) strlcat(p_path, ":", PATH_MAX);
+ (void) strlcat(p_path, minor_nm, PATH_MAX);
+ di_devfs_path_free(devfspath);
+
+ devfsadm_print(debug_mid, "drm_node: p_path %s\n", p_path);
+
+ if (devfsadm_enumerate_int(p_path, 0, &buf, drm_rules, 1)) {
+ free(p_path);
+ devfsadm_print(debug_mid, "drm_node: exit/coninue\n");
+ return (DEVFSADM_CONTINUE);
+ }
+ (void) snprintf(I_path, PATH_MAX, "dri/%s%s", name, buf);
+
+ devfsadm_print(debug_mid, "drm_node: p_path=%s buf=%s\n",
+ p_path, buf);
+
+ free(buf);
+
+ devfsadm_print(debug_mid, "mklink %s -> %s\n", I_path, p_path);
+ (void) devfsadm_mklink(I_path, node, minor, 0);
+
+ free(p_path);
+ free(I_path);
+
+ return (0);
+}
+
+/*
+ * /dev/mc/mc<chipid> -> /devices/.../pci1022,1102@<chipid+24>,2:mc-amd
+ */
+static int
+mc_node(di_minor_t minor, di_node_t node)
+{
+ const char *minorname = di_minor_name(minor);
+ const char *busaddr = di_bus_addr(node);
+ char linkpath[PATH_MAX];
+ int unitaddr;
+ char *c;
+
+ if (minorname == NULL || busaddr == NULL)
+ return (DEVFSADM_CONTINUE);
+
+ errno = 0;
+ unitaddr = strtol(busaddr, &c, 16);
+
+ if (errno != 0)
+ return (DEVFSADM_CONTINUE);
+
+ if (unitaddr == 0) {
+ (void) snprintf(linkpath, sizeof (linkpath), "mc/mc");
+ } else if (unitaddr >= MC_AMD_DEV_OFFSET) {
+ (void) snprintf(linkpath, sizeof (linkpath), "mc/mc%u",
+ unitaddr - MC_AMD_DEV_OFFSET);
+ } else {
+ (void) snprintf(linkpath, sizeof (linkpath), "mc/mc%u",
+ minor->dev_minor);
+ }
+ (void) devfsadm_mklink(linkpath, node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+/*
+ * Creates \M0 devlink for xsvc node
+ */
+static int
+xsvc(di_minor_t minor, di_node_t node)
+{
+ char *mn;
+
+ if (strcmp(di_node_name(node), "xsvc") != 0)
+ return (DEVFSADM_CONTINUE);
+
+ mn = di_minor_name(minor);
+ if (mn == NULL)
+ return (DEVFSADM_CONTINUE);
+
+ (void) devfsadm_mklink(mn, node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+/*
+ * Creates \M0 devlink for srn device
+ */
+static int
+srn(di_minor_t minor, di_node_t node)
+{
+ char *mn;
+
+ if (strcmp(di_node_name(node), "srn") != 0)
+ return (DEVFSADM_CONTINUE);
+
+ mn = di_minor_name(minor);
+ if (mn == NULL)
+ return (DEVFSADM_CONTINUE);
+
+ (void) devfsadm_mklink(mn, node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+/*
+ * /dev/ucode -> /devices/pseudo/ucode@0:ucode
+ */
+static int
+ucode(di_minor_t minor, di_node_t node)
+{
+ (void) devfsadm_mklink("ucode", node, minor, 0);
+ return (DEVFSADM_CONTINUE);
+}
+
+static int
+heci(di_minor_t minor, di_node_t node)
+{
+ if (strcmp(di_minor_name(minor), "AMT") == 0) {
+ (void) devfsadm_mklink("heci", node, minor, 0);
+ }
+ return (DEVFSADM_CONTINUE);
+}
diff --git a/usr/src/man/man7i/agpgart_io.7i b/usr/src/man/man7i/agpgart_io.7i
new file mode 100644
index 0000000..e0470bb
--- /dev/null
+++ b/usr/src/man/man7i/agpgart_io.7i
@@ -0,0 +1,926 @@
+'\" te
+.\" Copyright (c) 2008, Sun Microsystems, Inc. All Rights Reserved
+.\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License.
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing. See the License for the specific language governing permissions and limitations under the License.
+.\" When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner]
+.TH AGPGART_IO 7I "Sep 10, 2013"
+.SH NAME
+agpgart_io \- Solaris agpgart driver I/O control operations
+.SH SYNOPSIS
+.LP
+.nf
+#include <sys/agpgart.h>
+.fi
+
+.SH DESCRIPTION
+.sp
+.LP
+The Accelerated Graphics Port (AGP) is a PCI bus technology enhancement that
+improves 3D graphics performance by using low-cost system memory. AGP chipsets
+use the Graphics Address Remapping Table (GART) to map discontiguous system
+memory into a contiguous PCI memory range (known as the AGP Aperture), enabling
+the graphics card to utilize the mapped aperture range as video memory.
+.sp
+.LP
+The \fBagpgart\fR driver creates a pseudo device node at \fB/dev/agpgart\fR and
+provides a set of ioctls for managing allocation/deallocation of system
+memory, setting mappings between system memory and aperture range, and setting
+up AGP devices. The \fBagpgart\fR driver manages both pseudo and real device
+nodes, but to initiate AGP-related operations you operate only on the
+\fB/dev/agpgart\fR pseudo device node. To do this, open \fB/dev/agpgart\fR. The
+macro defined for the pseudo device node name is:
+.sp
+.in +2
+.nf
+#define AGP_DEVICE "/dev/agpgart"
+.fi
+.in -2
+
+.sp
+.LP
+The \fBagpgart_io\fR driver implementation is AGP architecture-dependent and
+cannot be made generic. Currently, the \fBagpgart_io\fR driver only supports
+specific AGP systems. To determine if a system is supported, run an
+\fBopen\fR(2) system call on the AGP_DEVICE node. (Note that \fBopen\fR(2)
+fails if a system is not supported). After the AGP_DEVICE is opened, you can
+use \fBkstat\fR(1M) to read the system architecture type.
+.sp
+.LP
+In addition to AGP system support, the \fBagpgart\fR ioctls can also be used on
+Intel integrated graphics devices (IGD). IGD devices usually have no dedicated
+video memory and must use system memory as video memory. IGD devices contain
+translation tables (referred to as \fBGTT\fR tables) that are similar to the
+GART translation table for address mapping purposes.
+.sp
+.LP
+Processes must open the \fBagpgart_io\fR driver utilizing a GRAPHICS_ACCESS
+privilege. Then all the ioctls can be called by this processes with the saved
+file descriptor. With the exception of AGPIOC_INFO, the AGPIOC_ACQUIRE ioctl
+must be called before any other ioctl. Once a process has acquired GART, it
+cannot be acquired by another process until the former process calls
+AGPIOC_RELEASE.
+.sp
+.LP
+If the AGP_DEVICE fails to open, it may be due to one of the following reasons:
+.sp
+.ne 2
+.na
+\fBEAGAIN\fR
+.ad
+.sp .6
+.RS 4n
+GART table allocation failed.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEIO\fR
+.ad
+.sp .6
+.RS 4n
+Internal hardware initialization failed.
+.RE
+
+.sp
+.ne 2
+.na
+\fBENXIO\fR
+.ad
+.sp .6
+.RS 4n
+Getting device soft state error. (This is unlikely to happen.)
+.RE
+
+.sp
+.ne 2
+.na
+\fB EPERM\fR
+.ad
+.sp .6
+.RS 4n
+Without enough privilege.
+.RE
+
+.SH IOCTLS
+.sp
+.LP
+With the exception of GPIOC_INFO, all ioctls shown in this section are
+protected by GRAPHICS_ACCESS privilege. (Only processes with GRAPHICS_ACCESS
+privilege in its effective set can access the privileged ioctls).
+.sp
+.LP
+Common ioctl error codes are shown below. (Additional error codes may be
+displayed by individual ioctls.)
+.sp
+.ne 2
+.na
+\fBENXIO\fR
+.ad
+.sp .6
+.RS 4n
+Ioctl command not supported or getting device soft state error.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEPERM\fR
+.ad
+.sp .6
+.RS 4n
+Process not privileged.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBAGPIOC_INFO\fR\fR
+.ad
+.sp .6
+.RS 4n
+Get system wide AGP or IGD hardware information. This command can be called by
+any process from user or kernel context.
+.sp
+.in +2
+.nf
+The argument is a pointer to agp_info_t structure.
+
+ typedef struct _agp_info {
+ agp_version_t agpi_version; /* OUT: AGP version supported */
+ uint32_t agpi_devid; /* OUT: bridge vendor + device */
+ uint32_t agpi_mode; /* OUT: mode of bridge */
+ ulong_t agpi_aperbase; /* OUT: base of aperture */
+ size_t agpi_apersize; /* OUT: aperture size in MB */
+ uint32_t agpi_pgtotal; /* OUT: max aperture pages avail. */
+ uint32_t agpi_pgsystem; /* OUT: same as pg_total */
+ uint32_t agpi_pgused; /* OUT: no. of currently used pages */
+ } agp_info_t;
+
+agpi_version The version of AGP protocol the bridge device is
+ compatible with, for example, major 3 and minor 0
+ means AGP version 3.0.
+
+ typedef struct _agp_version {
+ uint16_t agpv_major;
+ uint16_t agpv_minor;
+ } agp_version_t;
+
+agpi_devid AGP bridge vendor and device ID.
+agpi_mode Current AGP mode, read from AGP status register of
+ target device. The main bits are defined as below.
+ /* AGP status register bits definition */
+
+ #define AGPSTAT_RQ_MASK 0xff000000
+ #define AGPSTAT_SBA (0x1 << 9)
+ #define AGPSTAT_OVER4G (0x1 << 5)
+ #define AGPSTAT_FW (0x1 << 4)
+ #define AGPSTAT_RATE_MASK 0x7
+ /* AGP 3.0 only bits */
+ #define AGPSTAT_ARQSZ_MASK (0x7 << 13)
+ #define AGPSTAT_CAL_MASK (0x7 << 10)
+ #define AGPSTAT_GART64B (0x1 << 7)
+ #define AGPSTAT_MODE3 (0x1 << 3)
+ /* rate for 2.0 mode */
+ #define AGP2_RATE_1X 0x1
+ #define AGP2_RATE_2X 0x2
+ #define AGP2_RATE_4X 0x4
+ /* rate for 3.0 mode */
+ #define AGP3_RATE_4X 0x1
+ #define AGP3_RATE_8X 0x2
+
+agpi_aperbase The base address of aperture in PCI memory space.
+agpi_apersize The size of the aperture in megabytes.
+agpi_pgtotal Represents the maximum memory
+ pages the system can allocate
+ according to aperture size and
+ system memory size (which may differ
+ from the maximum locked memory a process
+ can have. The latter is subject
+ to the memory resource limit imposed
+ by the resource_controls(5) for each
+ project(4)):
+
+ project.max-device-locked-memory
+
+ This value can be modified through system
+ utilities like prctl(1).
+
+agpi_pgsystem Same as pg_total.
+agpi_pgused System pages already allocated by the driver.
+
+Return Values:
+
+ EFAULT Argument copy out error
+ EINVAL Command invalid
+ 0 Success
+.fi
+.in -2
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBAGPIOC_ACQUIRE\fR\fR
+.ad
+.sp .6
+.RS 4n
+Acquire control of GART. With the exception of AGPIOC_INFO, a process must
+acquire GART before can it call other agpgart ioctl commands. Additionally,
+only processes with GRAPHICS_ACCESS privilege may access this ioctl. In the
+current agpgart implementation, GART access is exclusive, meaning that only one
+process can perform GART operations at a time. To release control over GART,
+call AGPIOC_RELEASE. This command can be called from user or kernel context.
+.sp
+The argument should be NULL.
+.sp
+Return values:
+.sp
+.ne 2
+.na
+\fBEBUSY\fR
+.ad
+.RS 9n
+GART has been acquired
+.RE
+
+.sp
+.ne 2
+.na
+\fB0\fR
+.ad
+.RS 9n
+Success.
+.RE
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBAGPIOC_RELEASE\fR\fR
+.ad
+.sp .6
+.RS 4n
+Release GART control. If a process releases GART control, it cannot perform
+additional GART operations until GART is reacquired. Note that this command
+does not free allocated memory or clear GART entries. (All clear jobs are done
+by direct calls or by closing the device). When a process exits without making
+this ioctl, the final \fBclose\fR(2) performs this automatically. This command
+can be called from user or kernel context.
+.sp
+The argument should be NULL.
+.sp
+Return values:
+.sp
+.ne 2
+.na
+\fBEPERM\fR
+.ad
+.RS 9n
+Not owner of GART.
+.RE
+
+.sp
+.ne 2
+.na
+\fB0\fR
+.ad
+.RS 9n
+Success.
+.RE
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBAGPIOC_SETUP\fR\fR
+.ad
+.sp .6
+.RS 4n
+Setup AGPCMD register. An AGPCMD register resides in both the AGP master and
+target devices. The AGPCMD register controls the working mode of the AGP master
+and target devices. Each device must be configured using the same mode. This
+command can be called from user or kernel context.
+.sp
+.in +2
+.nf
+The argument is a pointer to agp_setup_t structure:
+
+ typedef struct _agp_setup {
+ uint32_t agps_mode; /* IN: value to be set for AGPCMD */
+ } agp_setup_t;
+
+agps_mode Specifying the mode to be set. Each bit of the value may have
+ a specific meaning, please refer to AGP 2.0/3.0 specification
+ or hardware datasheets for details.
+
+ /* AGP command register bits definition */
+ #define AGPCMD_RQ_MASK 0xff000000
+ #define AGPCMD_SBAEN (0x1 << 9)
+ #define AGPCMD_AGPEN (0x1 << 8)
+ #define AGPCMD_OVER4GEN (0x1 << 5)
+ #define AGPCMD_FWEN (0x1 << 4)
+ #define AGPCMD_RATE_MASK 0x7
+ /* AGP 3.0 only bits */
+ #define AGP3_CMD_ARQSZ_MASK (0x7 << 13)
+ #define AGP3_CMD_CAL_MASK (0x7 << 10)
+ #define AGP3_CMD_GART64BEN (0x1 << 7)
+.fi
+.in -2
+
+The final values set to the AGPCMD register of the master/target devices are
+decided by the agps_mode value and AGPSTAT of the master and target devices.
+.sp
+Return Values:
+.sp
+.ne 2
+.na
+\fBEPERM\fR
+.ad
+.RS 10n
+Not owner of GART.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEFAULT\fR
+.ad
+.RS 10n
+Argument copy in error.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEINVAL\fR
+.ad
+.RS 10n
+Command invalid for non-AGP system.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEIO\fR
+.ad
+.RS 10n
+Hardware setup error.
+.RE
+
+.sp
+.ne 2
+.na
+\fB0\fR
+.ad
+.RS 10n
+Success.
+.RE
+
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fBAGPIOC_ALLOCATE\fR\fR
+.ad
+.sp .6
+.RS 4n
+Allocate system memory for graphics device. This command returns a unique ID
+which can be used in subsequent operations to represent the allocated memory.
+The memory is made up of discontiguous physical pages. In rare cases, special
+memory types may be required. The allocated memory must be bound to the GART
+table before it can be used by graphics device. Graphics applications can also
+\fBmmap\fR(2) the memory to userland for data storing. Memory should be freed
+when it is no longer used by calling AGPIOC_DEALLOCATE or simply by closing the
+device. This command can be called from user or kernel context.
+.sp
+.in +2
+.nf
+The argument is a pointer to agp_allocate_t structure.
+
+ typedef struct _agp_allocate {
+ int32_t agpa_key; /* OUT:ID of allocated memory */
+ uint32_t agpa_pgcount;/* IN: no. of pages to be allocated */
+ uint32_t agpa_type;/* IN: type of memory to be allocated */
+ uint32_t agpa_physical; /* OUT: reserved */
+ } agp_allocate_t;
+.fi
+.in -2
+
+.sp
+.ne 2
+.na
+\fBagpa_key\fR
+.ad
+.RS 21n
+Unique ID of the allocated memory.
+.RE
+
+.sp
+.ne 2
+.na
+\fBagpa_pgcount\fR
+.ad
+.RS 21n
+Number of pages to be allocated. The driver currently supports only 4K pages.
+The value cannot exceed the agpi_pgtotal value returned by AGPIOC_INFO ioct and
+is subject to the limit of project.max-device-locked-memory. If the memory
+needed is larger than the resource limit but not larger than agpi_pgtotal, use
+\fBprctl\fR(1) or other system utilities to change the default value of memory
+resource limit beforehand.
+.RE
+
+.sp
+.ne 2
+.na
+\fBagpa_type\fR
+.ad
+.RS 21n
+Type of memory to be allocated. The valid value of agpa_type should be
+AGP_NORMAL. It is defined as:
+.sp
+.in +2
+.nf
+ #define AGP_NORMAL 0
+.fi
+.in -2
+
+Above, AGP_NORMAL represents the discontiguous non-cachable physical memory
+which doesn't consume kernel virtual space but can be mapped to user space by
+\fBmmap\fR(2). This command may support more type values in the future.
+.RE
+
+.sp
+.ne 2
+.na
+\fBagpa_physical\fR
+.ad
+.RS 21n
+Reserved for special uses. In normal operations, the value is undefined.
+.sp
+Return Values:
+.sp
+.ne 2
+.na
+\fBEPERM\fR
+.ad
+.RS 10n
+Not owner of GART.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEINVAL\fR
+.ad
+.RS 10n
+Argument not valid.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEFAULT\fR
+.ad
+.RS 10n
+Argument copy in/out error.
+.RE
+
+.sp
+.ne 2
+.na
+\fBENOMEM\fR
+.ad
+.RS 10n
+Memory allocation error.
+.RE
+
+.sp
+.ne 2
+.na
+\fB0\fR
+.ad
+.RS 10n
+Success.
+.RE
+
+.RE
+
+.sp
+.ne 2
+.na
+\fBAGPIOC_DEALLOCATE\fR
+.ad
+.RS 21n
+Deallocate the memory identified by a key assigned in a previous allocation. If
+the memory isn't unbound from GART, this command unbinds it automatically. The
+memory should no longer be used and those still in mapping to userland cannot
+be deallocated. Always call AGPIOC_DEALLOCATE explicitly (instead of
+deallocating implicitly by closing the device), as the system won't carry out
+the job until the last reference to the device file is dropped. This command
+from user or kernel context.
+.sp
+The input argument is a key of type int32_t, no output argument.
+.sp
+Return Values:
+.sp
+.ne 2
+.na
+\fBEPERM\fR
+.ad
+.RS 10n
+Not owner of GART.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEINVAL\fR
+.ad
+.RS 10n
+Key not valid or memory in use.
+.RE
+
+.sp
+.ne 2
+.na
+\fB0\fR
+.ad
+.RS 10n
+Success.
+.RE
+
+.RE
+
+.sp
+.ne 2
+.na
+\fBAGPIOC_BIND\fR
+.ad
+.RS 21n
+Bind allocated memory. This command binds the allocated memory identified
+by a key to a specific offset of the GART table, which enables GART to
+translate the aperture range at the offset to system memory. Each GART entry
+represents one physical page. If the GART range is previously bound to other
+system memory, it returns an error. Once the memory is bound, it cannot be
+bound to other offsets unless it is unbound. To unbind the memory, call
+AGPIOC_UNBIND or deallocate the memory. This command can be called from user or
+kernel context.
+.sp
+.in +2
+.nf
+The argument is a pointer to agp_bind_t structure:
+
+ typedef struct _agp_bind {
+ int32_t agpb_key; /* IN: ID of memory to be bound */
+ uint32_t agpb_pgstart; /* IN: offset in aperture */
+ } agp_bind_t;
+.fi
+.in -2
+
+.sp
+.ne 2
+.na
+\fBagpb_key\fR
+.ad
+.RS 20n
+The unique ID of the memory to be bound, which is previously allocated by
+calling AGPIOC_ALLOCATE.
+.RE
+
+.sp
+.ne 2
+.na
+\fBagpb_pgstart\fR
+.ad
+.RS 20n
+The starting page offset to be bound in aperture space.
+.RE
+
+Return Values:
+.sp
+.ne 2
+.na
+\fBEPERM\fR
+.ad
+.RS 20n
+Not owner of GART.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEFAULT\fR
+.ad
+.RS 20n
+Argument copy in error.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEINVAL\fR
+.ad
+.RS 20n
+Argument not valid.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEIO\fR
+.ad
+.RS 20n
+Binding to the GTT table of IGD devices failed.
+.RE
+
+.sp
+.ne 2
+.na
+\fB0\fR
+.ad
+.RS 20n
+Success.
+.RE
+
+.RE
+
+.sp
+.ne 2
+.na
+\fBAGPIOC_UNBIND\fR
+.ad
+.RS 21n
+Unbind memory identified by a key from the GART. This command clears the
+corresponding entries in the GART table. Only the memory not in mapping to
+userland is allowed to be unbound.
+.sp
+This ioctl command can be called from user or kernel context.
+.sp
+.in +2
+.nf
+The argument is a pointer to agp_unbind_t structure.
+
+ typedef struct _agp_unbind {
+ int32_t agpu_key; /* IN: key of memory to be unbound*/
+ uint32_t agpu_pri; /* Not used: for compat. with Xorg */
+ } agp_unbind_t;
+.fi
+.in -2
+
+.sp
+.ne 2
+.na
+\fBagpu_key\fR
+.ad
+.RS 20n
+Unique ID of the memory to be unbound which was previously bound by calling
+AGPIOC_BIND.
+.RE
+
+.sp
+.ne 2
+.na
+\fBagpu_pri\fR
+.ad
+.RS 20n
+Reserved for compatibility with X.org/XFree86, not used.
+.RE
+
+Return Values:
+.sp
+.ne 2
+.na
+\fBEPERM\fR
+.ad
+.RS 20n
+Not owner of GART.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEFAULT\fR
+.ad
+.RS 20n
+Argument copy in error.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEINVAL\fR
+.ad
+.RS 20n
+Argument not valid or memory in use.
+.RE
+
+.sp
+.ne 2
+.na
+\fBEIO\fR
+.ad
+.RS 20n
+Unbinding from the GTT table of IGD devices failed.
+.RE
+
+.sp
+.ne 2
+.na
+\fB0\fR
+.ad
+.RS 20n
+Success
+.RE
+
+.RE
+
+.RE
+
+.SH EXAMPLE
+.sp
+.LP
+Below is an sample program showing how agpgart ioctls can be used:
+.sp
+.in +2
+.nf
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h
+#include <sys/ioccom.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/agpgart.h>
+
+#define AGP_PAGE_SIZE 4096
+
+int main(int argc, char *argv[])
+{
+ int fd, ret;
+ agp_allocate_t alloc;
+ agp_bind_t bindinfo;
+ agp_info_t agpinfo;
+ agp_setup_t modesetup;
+ int *p = NULL;
+ off_t mapoff;
+ size_t maplen;
+
+ if((fd = open(AGP_DEVICE, O_RDWR))== -1) {
+ printf("open AGP_DEVICE error with %d\en", errno);\e
+ exit(-1);
+ }
+ printf("device opened\en");
+
+ ret = ioctl(fd, AGPIOC_INFO, &agpinfo);
+ if(ret == -1) {
+ printf("Get info error %d\en", errno);
+ exit(-1);
+ }
+ printf("AGPSTAT is %x\en", agpinfo.agpi_mode);
+ printf("APBASE is %x\en", agpinfo.agpi_aperbase);
+ printf("APSIZE is %dMB\en", agpinfo.agpi_apersize);
+ printf("pg_total is %d\en", agpinfo.agpi_pgtotal);
+
+ ret = ioctl(fd, AGPIOC_ACQUIRE);
+ if(ret == -1) {
+ printf(" Acquire GART error %d\en", errno);
+ exit(-1);
+ }
+
+ modesetup.agps_mode = agpinfo.agpi_mode;
+ ret = ioctl(fd, AGPIOC_SETUP, &modesetup);
+ if(ret == -1) {
+ printf("set up AGP mode error\en", errno);
+ exit(-1);
+ }
+
+ printf("Please input the number of pages you want to allocate\en");
+ scanf("%d", &alloc.agpa_pgcount);
+ alloc.agpa_type = AGP_NORMAL;
+ ret = ioctl(fd, AGPIOC_ALLOCATE, &alloc);
+ if(ret == -1) {
+ printf("Allocate memory error %d\en", errno);
+ exit(-1);
+ }
+
+ printf("Please input the aperture page offset to bind\en");
+ scanf("%d", &bindinfo.agpb_pgstart);
+ bindinfo.agpb_key = alloc.agpa_key;
+ ret = ioctl(fd, AGPIOC_BIND, &bindinfo);
+ if(ret == -1) {
+ printf("Bind error %d\en", errno);
+ exit(-1);
+ }
+ printf("Bind successful\en");
+
+ /*
+ * Now gart aperture space from (bindinfo.agpb_pgstart) to
+ * (bindinfo.agpb_pgstart + alloc.agpa_pgcount) can be used for
+ * AGP graphics transactions
+ */
+ ...
+
+ /*
+ * mmap can allow user processes to store graphics data
+ * to the aperture space
+ */
+ maplen = alloc.agpa_pgcount * AGP_PAGE_SIZE;
+ mapoff = bindinfo.agpb_pgstart * AGP_PAGE_SIZE;
+ p = (int *)mmap((caddr_t)0, maplen, (PROT_READ | PROT_WRITE),
+ MAP_SHARED, fd, mapoff);
+ if (p == MAP_FAILED) {
+ printf("Mmap error %d\en", errno);
+ exit(-1);
+ }
+ printf("Mmap successful\en");
+ ...
+
+ /*
+ * When user processes finish access to the aperture space,
+ * unmap the memory range
+ */
+ munmap((void *)p, maplen);
+ ...
+
+ /*
+ * After finishing AGP transactions, the resources can be freed
+ * step by step or simply by close device.
+ */
+ ret = ioctl(fd, AGPIOC_DEALLOCATE, alloc.agpa_key);
+ if(ret == -1) {
+ printf(" Deallocate memory error %d\en", errno);
+ exit(-1);
+ }
+
+ ret = ioctl(fd, AGPIOC_RELEASE);
+ if(ret == -1) {
+ printf(" Release GART error %d\en", errno);
+ exit(-1);
+ }
+
+ close(fd);
+}
+.fi
+.in -2
+
+.SH FILES
+.sp
+.ne 2
+.na
+\fB\fB/dev/agpgart\fR\fR
+.ad
+.sp .6
+.RS 4n
+Symbolic link to the pseudo agpgart device.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fB/platform/i86pc/kernel/drv/agpgart\fR\fR
+.ad
+.sp .6
+.RS 4n
+agpgart pseudo driver.
+.RE
+
+.sp
+.ne 2
+.na
+\fB\fB/platform/i86pc/kernel/drv/agpgart.conf\fR\fR
+.ad
+.sp .6
+.RS 4n
+Driver configuration file.
+.RE
+
+.SH ATTRIBUTES
+.sp
+.LP
+See \fBattributes\fR(5) for descriptions of the following attributes:
+.sp
+
+.sp
+.TS
+box;
+c | c
+l | l .
+ATTRIBUTE TYPE ATTRIBUTE VALUE
+_
+Architecture X86
+_
+Stability level Unstable
+.TE
+
+.SH SEE ALSO
+.sp
+.LP
+\fBprctl\fR(1), \fBkstat\fR(1M), \fBclose\fR(2), \fBioctl\fR(2), \fBopen\fR(2),
+\fBmmap\fR(2), \fBproject\fR(4), \fBprivileges\fR(5), \fBattributes\fR(5),
+\fBresource_controls\fR(5)
diff --git a/usr/src/pkg/manifests/driver-graphics-agpgart.mf b/usr/src/pkg/manifests/driver-graphics-agpgart.mf
new file mode 100644
index 0000000..59b2deb
--- /dev/null
+++ b/usr/src/pkg/manifests/driver-graphics-agpgart.mf
@@ -0,0 +1,92 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+#
+
+#
+# The default for payload-bearing actions in this package is to appear in the
+# global zone only. See the include file for greater detail, as well as
+# information about overriding the defaults.
+#
+<include global_zone_only_component>
+set name=pkg.fmri value=pkg:/driver/graphics/agpgart@$(PKGVERS)
+set name=pkg.description value="AGP GART driver for x86 workstations"
+set name=pkg.summary value="AGP GART Driver"
+set name=info.classification \
+ value=org.opensolaris.category.2008:Drivers/Display
+set name=variant.arch value=i386
+dir path=kernel group=sys
+dir path=kernel/drv group=sys
+dir path=kernel/drv/$(ARCH64) group=sys
+dir path=usr/share/man
+dir path=usr/share/man/man7i
+driver name=$(ARCH64)_gart alias=pci1022,1103 perms="* 0644 root sys"
+driver name=agpgart perms="* 0644 root sys"
+driver name=agptarget perms="* 0644 root sys" \
+ alias=pci1022,7454 \
+ alias=pci8086,1130 \
+ alias=pci8086,2560 \
+ alias=pci8086,2570 \
+ alias=pci8086,2580 \
+ alias=pci8086,2590 \
+ alias=pci8086,2770 \
+ alias=pci8086,27a0 \
+ alias=pci8086,27ac \
+ alias=pci8086,2970 \
+ alias=pci8086,2980 \
+ alias=pci8086,2990 \
+ alias=pci8086,29a0 \
+ alias=pci8086,29b0 \
+ alias=pci8086,29c0 \
+ alias=pci8086,29d0 \
+ alias=pci8086,2a00 \
+ alias=pci8086,2a10 \
+ alias=pci8086,2a40 \
+ alias=pci8086,2e00 \
+ alias=pci8086,2e10 \
+ alias=pci8086,2e20 \
+ alias=pci8086,2e30 \
+ alias=pci8086,2e40 \
+ alias=pci8086,3575 \
+ alias=pci8086,3580 \
+ alias=pci8086,40 \
+ alias=pci8086,44 \
+ alias=pci8086,62 \
+ alias=pci8086,6a \
+ alias=pci8086,7120 \
+ alias=pci8086,7122 \
+ alias=pci8086,7124
+file path=kernel/drv/$(ARCH64)/$(ARCH64)_gart group=sys
+file path=kernel/drv/$(ARCH64)/agpgart group=sys
+file path=kernel/drv/$(ARCH64)/agptarget group=sys
+file path=kernel/drv/$(ARCH64)_gart group=sys
+file path=kernel/drv/agpgart group=sys
+file path=kernel/drv/agpgart.conf group=sys
+file path=kernel/drv/agptarget group=sys
+file path=usr/share/man/man7i/agpgart_io.7i
+legacy pkg=SUNWagp desc="AGP GART driver for x86 workstations" \
+ name="AGP GART Driver"
+license cr_Sun license=cr_Sun
+license lic_CDDL license=lic_CDDL
+license usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart \
+ license=usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart
diff --git a/usr/src/pkg/manifests/driver-graphics-drm.mf b/usr/src/pkg/manifests/driver-graphics-drm.mf
new file mode 100644
index 0000000..a1c3a94
--- /dev/null
+++ b/usr/src/pkg/manifests/driver-graphics-drm.mf
@@ -0,0 +1,83 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+#
+
+#
+# The default for payload-bearing actions in this package is to appear in the
+# global zone only. See the include file for greater detail, as well as
+# information about overriding the defaults.
+#
+<include global_zone_only_component>
+set name=pkg.fmri value=pkg:/driver/graphics/drm@$(PKGVERS)
+set name=pkg.description \
+ value="Direct Rendering Manager kernel drivers and modules"
+set name=pkg.summary value="DRM Kernel Drivers"
+set name=info.classification \
+ value=org.opensolaris.category.2008:Drivers/Display
+set name=variant.arch value=i386
+dir path=kernel group=sys
+dir path=kernel/drv group=sys
+dir path=kernel/drv/$(ARCH64) group=sys
+dir path=kernel/misc group=sys
+dir path=kernel/misc/$(ARCH64) group=sys
+dir path=usr/share/man
+dir path=usr/share/man/man7d
+driver name=i915 perms="* 0644 root sys" \
+ alias=pci8086,2562 \
+ alias=pci8086,2572 \
+ alias=pci8086,2582 \
+ alias=pci8086,2592 \
+ alias=pci8086,2772 \
+ alias=pci8086,27a2 \
+ alias=pci8086,27ae \
+ alias=pci8086,2972 \
+ alias=pci8086,2982 \
+ alias=pci8086,2992 \
+ alias=pci8086,29a2 \
+ alias=pci8086,29b2 \
+ alias=pci8086,29c2 \
+ alias=pci8086,29d2 \
+ alias=pci8086,2a02 \
+ alias=pci8086,2a12 \
+ alias=pci8086,2a42 \
+ alias=pci8086,2e02.8086.2e02 \
+ alias=pci8086,2e12 \
+ alias=pci8086,2e22 \
+ alias=pci8086,2e32 \
+ alias=pci8086,2e42 \
+ alias=pci8086,42 \
+ alias=pci8086,46
+file path=kernel/drv/$(ARCH64)/i915 group=sys
+file path=kernel/drv/$(ARCH64)/radeon group=sys
+file path=kernel/drv/i915 group=sys
+file path=kernel/drv/radeon group=sys
+file path=kernel/misc/$(ARCH64)/drm group=sys mode=0755
+file path=kernel/misc/drm group=sys mode=0755
+file path=usr/share/man/man7d/i915.7d
+file path=usr/share/man/man7d/radeon.7d
+legacy pkg=SUNWdrmr desc="Direct Rendering Manager kernel drivers and modules" \
+ name="DRM Kernel Drivers, (Root)"
+license cr_Sun license=cr_Sun
+license usr/src/uts/common/io/drm/THIRDPARTYLICENSE \
+ license=usr/src/uts/common/io/drm/THIRDPARTYLICENSE
diff --git a/usr/src/pkg/manifests/system-header-header-agp.mf b/usr/src/pkg/manifests/system-header-header-agp.mf
new file mode 100644
index 0000000..bfc6bbd
--- /dev/null
+++ b/usr/src/pkg/manifests/system-header-header-agp.mf
@@ -0,0 +1,47 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri value=pkg:/system/header/header-agp@$(PKGVERS)
+set name=pkg.description value="AGP GART Header Files for x86 Workstations"
+set name=pkg.summary value="AGP GART Driver Header Files"
+set name=info.classification \
+ value=org.opensolaris.category.2008:Development/System
+set name=variant.arch value=i386
+dir path=usr group=sys
+dir path=usr/include
+dir path=usr/include/sys
+dir path=usr/include/sys/agp
+file path=usr/include/sys/agp/agp$(ARCH64)gart_io.h
+file path=usr/include/sys/agp/agpdefs.h
+file path=usr/include/sys/agp/agpgart_impl.h
+file path=usr/include/sys/agp/agpmaster_io.h
+file path=usr/include/sys/agp/agptarget_io.h
+file path=usr/include/sys/agpgart.h
+legacy pkg=SUNWagph desc="AGP GART Header Files for x86 Workstations" \
+ name="AGP GART Driver Header Files"
+license cr_Sun license=cr_Sun
+license lic_CDDL license=lic_CDDL
+license usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart \
+ license=usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart
diff --git a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE b/usr/src/uts/common/io/drm/THIRDPARTYLICENSE
new file mode 100644
index 0000000..30228d9
--- /dev/null
+++ b/usr/src/uts/common/io/drm/THIRDPARTYLICENSE
@@ -0,0 +1,314 @@
+ Solaris Direct Rendering Manager kernel drivers and modules
+
+--------------------------------------------------------------------------
+
+In addition to a Sun copyright, the following files:
+
+usr/src/uts/common/io/drm/drm.h
+usr/src/uts/common/io/drm/drmP.h
+usr/src/uts/common/io/drm/drm_agpsupport.c
+usr/src/uts/common/io/drm/drm_auth.c
+usr/src/uts/common/io/drm/drm_fops.c
+usr/src/uts/common/io/drm/drm_ioctl.c
+usr/src/uts/common/io/drm/drm_lock.c
+usr/src/uts/common/io/drm/drm_memory.c
+
+are covered by the following copyrights/license text:
+
+/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+--------------------------------------------------------------------------
+
+File:
+
+usr/src/uts/common/io/drm/drm_drawable.c
+
+is covered by the following copyrights/license text:
+
+/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+--------------------------------------------------------------------------
+
+In addition to a Sun copyright, the following files:
+
+usr/src/uts/common/io/drm/drm_irq.c
+usr/src/uts/common/io/drm/drm_pci.c
+
+are covered by the following copyrights/license text:
+
+/*
+ * Copyright 2003 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+--------------------------------------------------------------------------
+
+File:
+
+usr/src/uts/common/io/drm/drm_sarea.h
+
+are covered by the following copyrights/license text:
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+--------------------------------------------------------------------------
+
+In addition to a Sun copyright, the following files:
+
+usr/src/uts/common/io/drm/drm_scatter.c
+usr/src/uts/i86pc/io/drm/i915_drv.c
+
+are covered by the following copyrights/license text:
+
+/*-
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+--------------------------------------------------------------------------
+
+In addition to a Sun copyright, the following files:
+
+usr/src/uts/common/io/drm/drm_bufs.c
+usr/src/uts/common/io/drm/drm_context.c
+usr/src/uts/common/io/drm/drm_dma.c
+usr/src/uts/common/io/drm/drm_drv.c
+
+are covered by the following copyrights/license text:
+
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+--------------------------------------------------------------------------
+
+File:
+
+usr/src/uts/common/io/drm/queue.h
+
+is covered by the following copyrights/license text:
+
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+--------------------------------------------------------------------------
+
+In addition to a Sun copyright, the following files:
+
+usr/src/uts/i86pc/io/drm/i915_dma.c
+usr/src/uts/i86pc/io/drm/i915_drm.h
+usr/src/uts/i86pc/io/drm/i915_drv.h
+usr/src/uts/i86pc/io/drm/i915_irq.c
+usr/src/uts/i86pc/io/drm/i915_mem.c
+
+are covered by the following copyrights/license text:
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+--------------------------------------------------------------------------
+
+File:
+
+usr/src/uts/common/io/drm/drm_pciids.txt
+
+is not covered by any copyright.
+
+--------------------------------------------------------------------------
+
+All other files are covered by a Sun copyright and the CDDL:
+
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
diff --git a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip
new file mode 100644
index 0000000..70fef96
--- /dev/null
+++ b/usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip
@@ -0,0 +1 @@
+DRM MODULES
diff --git a/usr/src/uts/common/io/drm/ati_pcigart.c b/usr/src/uts/common/io/drm/ati_pcigart.c
new file mode 100644
index 0000000..4c236c1
--- /dev/null
+++ b/usr/src/uts/common/io/drm/ati_pcigart.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include "drmP.h"
+
+#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
+#define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */
+#define ATI_PCIGART_TABLE_SIZE 32768
+
+int
+drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+{
+ unsigned long pages;
+ drm_sg_mem_t *entry;
+ drm_dma_handle_t *dmah;
+ u32 *pci_gart = NULL, page_base;
+ int i, j, k;
+ int pagenum;
+ size_t bulksize;
+
+ entry = dev->sg;
+ if (entry == NULL) {
+ DRM_ERROR("no scatter/gather memory!\n");
+ return (0);
+ }
+
+ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+ /* GART table in system memory */
+ entry->dmah_gart = drm_pci_alloc(dev, ATI_PCIGART_TABLE_SIZE, 0,
+ 0xfffffffful, 1);
+ if (entry->dmah_gart == NULL) {
+ DRM_ERROR("cannot allocate PCI GART table!\n");
+ return (0);
+ }
+ gart_info->addr = (void *)entry->dmah_gart->vaddr;
+ gart_info->bus_addr = entry->dmah_gart->paddr;
+ pci_gart = (u32 *)entry->dmah_gart->vaddr;
+ } else {
+ /* GART table in framebuffer memory */
+ pci_gart = gart_info->addr;
+ }
+
+ pages = DRM_MIN(entry->pages, ATI_MAX_PCIGART_PAGES);
+ bzero(pci_gart, ATI_PCIGART_TABLE_SIZE);
+ /*CONSTCOND*/
+ ASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE);
+
+ dmah = entry->dmah_sg;
+ pagenum = 0;
+ for (i = 0; i < dmah->cookie_num; i++) {
+ bulksize = dmah->cookie.dmac_size;
+ for (k = 0; k < bulksize / PAGE_SIZE; k++) {
+ entry->busaddr[pagenum] =
+ dmah->cookie.dmac_address + k * PAGE_SIZE;
+ page_base = (u32) entry->busaddr[pagenum];
+ if (pagenum ++ == pages)
+ goto out;
+ for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+ j++) {
+ if (gart_info->is_pcie)
+ *pci_gart = (page_base >> 8) | 0xc;
+ else
+ *pci_gart = page_base;
+ pci_gart++;
+ page_base += ATI_PCIGART_PAGE_SIZE;
+ }
+ }
+ ddi_dma_nextcookie(dmah->dma_hdl, &dmah->cookie);
+ }
+
+out:
+ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+ (void) ddi_dma_sync(entry->dmah_gart->dma_hdl, 0,
+ entry->dmah_gart->real_sz, DDI_DMA_SYNC_FORDEV);
+ }
+
+ return (1);
+}
+
+/*ARGSUSED*/
+extern int
+drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+{
+ drm_dma_handle_t *dmah;
+
+ if (dev->sg == NULL) {
+ DRM_ERROR("no scatter/gather memory!\n");
+ return (0);
+ }
+ dmah = dev->sg->dmah_gart;
+ dev->sg->dmah_gart = NULL;
+ if (dmah)
+ drm_pci_free(dev, dmah);
+ return (1);
+}
diff --git a/usr/src/uts/common/io/drm/drm.h b/usr/src/uts/common/io/drm/drm.h
new file mode 100644
index 0000000..87af6ed
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm.h
@@ -0,0 +1,865 @@
+/* BEGIN CSTYLED */
+
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \mainpage
+ *
+ * The Direct Rendering Manager (DRM) is a device-independent kernel-level
+ * device driver that provides support for the XFree86 Direct Rendering
+ * Infrastructure (DRI).
+ *
+ * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
+ * ways:
+ * -# The DRM provides synchronized access to the graphics hardware via
+ * the use of an optimized two-tiered lock.
+ * -# The DRM enforces the DRI security policy for access to the graphics
+ * hardware by only allowing authenticated X11 clients access to
+ * restricted regions of memory.
+ * -# The DRM provides a generic DMA engine, complete with multiple
+ * queues and the ability to detect the need for an OpenGL context
+ * switch.
+ * -# The DRM is extensible via the use of small device-specific modules
+ * that rely extensively on the API exported by the DRM module.
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#include <sys/types32.h>
+
+#ifndef __user
+#define __user
+#endif
+
+#ifdef __GNUC__
+# define DEPRECATED __attribute__ ((deprecated))
+#else
+# define DEPRECATED
+# define __volatile__ volatile
+#endif
+
+#if defined(__linux__)
+#include <asm/ioctl.h> /* For _IO* macros */
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID _IOC_NONE
+#define DRM_IOC_READ _IOC_READ
+#define DRM_IOC_WRITE _IOC_WRITE
+#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
+#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(IN_MODULE)
+/* Prevent name collision when including sys/ioccom.h */
+#undef ioctl
+#include <sys/ioccom.h>
+#define ioctl(a,b,c) xf86ioctl(a,b,c)
+#else
+#include <sys/ioccom.h>
+#endif /* __FreeBSD__ && xf86ioctl */
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#endif
+
+/* Solaris-specific. */
+#if defined(__SOLARIS__) || defined(sun)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir, type, nr, size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used for X server compile */
+#if !defined(_KERNEL)
+#define _IO(type, nr) _IOC(_IOC_NONE, (type), (nr), 0)
+#define _IOR(type, nr, size) _IOC(_IOC_READ, (type), (nr), sizeof (size))
+#define _IOW(type, nr, size) _IOC(_IOC_WRITE, (type), (nr), sizeof (size))
+#define _IOWR(type, nr, size) _IOC(_IOC_READ|_IOC_WRITE, \
+ (type), (nr), sizeof (size))
+
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+#endif /* _KERNEL */
+
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#endif /* __Solaris__ or sun */
+#define XFREE86_VERSION(major,minor,patch,snap) \
+ ((major << 16) | (minor << 8) | patch)
+
+#ifndef CONFIG_XFREE86_VERSION
+#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
+#endif
+
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
+#define DRM_PROC_DEVICES "/proc/devices"
+#define DRM_PROC_MISC "/proc/misc"
+#define DRM_PROC_DRM "/proc/drm"
+#define DRM_DEV_DRM "/dev/drm"
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+#endif
+
+#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
+#ifdef __OpenBSD__
+#define DRM_MAJOR 81
+#endif
+#if defined(__linux__) || defined(__NetBSD__)
+#define DRM_MAJOR 226
+#endif
+#define DRM_MAX_MINOR 15
+#endif
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+#if defined(__linux__)
+#if defined(__KERNEL__)
+typedef __u64 drm_u64_t;
+#else
+typedef unsigned long long drm_u64_t;
+#endif
+
+typedef unsigned int drm_handle_t;
+#else
+#include <sys/types.h>
+typedef uint64_t drm_u64_t;
+typedef unsigned long long drm_handle_t; /**< To mapped regions */
+#endif
+typedef unsigned int drm_context_t; /**< GLXContext handle */
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t; /**< Magic for authentication */
+
+/**
+ * Cliprect.
+ *
+ * \warning If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+typedef struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+} drm_clip_rect_t;
+
+/**
+ * Drawable information.
+ */
+typedef struct drm_drawable_info {
+ unsigned int num_rects;
+ drm_clip_rect_t *rects;
+} drm_drawable_info_t;
+
+/**
+ * Texture region,
+ */
+typedef struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+} drm_tex_region_t;
+
+/**
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+typedef struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+} drm_hw_lock_t;
+
+/* This is beyond ugly, and only works on GCC. However, it allows me to use
+ * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
+ * fix is to use uint32_t instead of size_t, but that fix will break existing
+ * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
+ * eventually happen, though. I chose 'unsigned long' to be the fallback type
+ * because that works on all the platforms I know about. Hopefully, the
+ * real fix will happen before that bites us.
+ */
+
+#ifdef __SIZE_TYPE__
+# define DRM_SIZE_T __SIZE_TYPE__
+#else
+#if !defined(__SOLARIS__) && !defined(sun)
+# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
+#endif
+# define DRM_SIZE_T unsigned long
+#endif
+
+/**
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+typedef struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ DRM_SIZE_T name_len; /**< Length of name buffer */
+ char __user *name; /**< Name of driver */
+ DRM_SIZE_T date_len; /**< Length of date buffer */
+ char __user *date; /**< User-space buffer to hold date */
+ DRM_SIZE_T desc_len; /**< Length of desc buffer */
+ char __user *desc; /**< User-space buffer to hold desc */
+} drm_version_t;
+
+/**
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+typedef struct drm_unique {
+ DRM_SIZE_T unique_len; /**< Length of unique */
+ char __user *unique; /**< Unique name for driver instantiation */
+} drm_unique_t;
+
+#undef DRM_SIZE_T
+
+typedef struct drm_list {
+ int count; /**< Length of user-space structures */
+ drm_version_t __user *version;
+} drm_list_t;
+
+typedef struct drm_block {
+ int unused;
+} drm_block_t;
+
+/**
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+typedef struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+} drm_control_t;
+
+/**
+ * Type of memory to map.
+ */
+typedef enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
+ _DRM_TTM = 6
+} drm_map_type_t;
+
+/**
+ * Memory mapping flags.
+ */
+typedef enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+} drm_map_flags_t;
+
+typedef struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+} drm_ctx_priv_map_t;
+
+/**
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+typedef struct drm_map {
+ unsigned long long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long long handle;
+ /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ unsigned long size; /**< Requested physical size (bytes) */
+ drm_map_type_t type; /**< Type of memory to map */
+ drm_map_flags_t flags; /**< Flags */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+} drm_map_t;
+
+/**
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+typedef struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+} drm_client_t;
+
+typedef enum {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+} drm_stat_type_t;
+
+/**
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+typedef struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ drm_stat_type_t type;
+ } data[15];
+} drm_stats_t;
+
+/**
+ * Hardware locking flags.
+ */
+typedef enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+} drm_lock_flags_t;
+
+/**
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+typedef struct drm_lock {
+ int context;
+ drm_lock_flags_t flags;
+} drm_lock_t;
+
+/**
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+typedef enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+} drm_dma_flags_t;
+
+/**
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+typedef enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+} drm_buf_flag;
+typedef struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ drm_buf_flag flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+} drm_buf_desc_t;
+
+/**
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+typedef struct drm_buf_info {
+ int count; /**< Number of buffers described in list */
+ drm_buf_desc_t __user *list; /**< List of buffer descriptions */
+} drm_buf_info_t;
+
+/**
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+typedef struct drm_buf_free {
+ int count;
+ int __user *list;
+} drm_buf_free_t;
+
+/**
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+typedef struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void __user *address; /**< Address of buffer */
+} drm_buf_pub_t;
+
+/**
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+typedef struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+#if defined(__cplusplus)
+ void __user *c_virtual;
+#else
+ void __user *virtual; /**< Mmap'd area in user-virtual */
+#endif
+ drm_buf_pub_t __user *list; /**< Buffer information */
+ int fd;
+} drm_buf_map_t;
+
+/**
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+typedef struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int __user *send_indices; /**< List of handles to buffers */
+ int __user *send_sizes; /**< Lengths of data to send */
+ drm_dma_flags_t flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int __user *request_indices; /**< Buffer information */
+ int __user *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+} drm_dma_t;
+
+typedef enum {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+} drm_ctx_flags_t;
+
+/**
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+typedef struct drm_ctx {
+ drm_context_t handle;
+ drm_ctx_flags_t flags;
+} drm_ctx_t;
+
+/**
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+typedef struct drm_ctx_res {
+ int count;
+ drm_ctx_t __user *contexts;
+} drm_ctx_res_t;
+
+
+/**
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+typedef struct drm_draw {
+ drm_drawable_t handle;
+} drm_draw_t;
+
+/**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS,
+} drm_drawable_info_type_t;
+
+typedef struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+} drm_update_draw_t;
+
+/**
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+typedef struct drm_auth {
+ drm_magic_t magic;
+} drm_auth_t;
+
+/**
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+typedef struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+} drm_irq_busid_t;
+
+typedef enum {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
+} drm_vblank_seq_type_t;
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
+ _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ drm_vblank_seq_type_t type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ drm_vblank_seq_type_t type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+typedef union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+} drm_wait_vblank_t;
+
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+typedef struct drm_modeset_ctl {
+ uint32_t crtc;
+ uint32_t cmd;
+} drm_modeset_ctl_t;
+
+/**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+typedef struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+} drm_agp_mode_t;
+
+/**
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+typedef struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+} drm_agp_buffer_t;
+
+/**
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+typedef struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+} drm_agp_binding_t;
+
+/**
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+typedef struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /**< physical address */
+ unsigned long aperture_size; /**< bytes */
+ unsigned long memory_allowed; /**< bytes */
+ unsigned long memory_used;
+
+ /** \name PCI information */
+ /*@{ */
+ unsigned short id_vendor;
+ unsigned short id_device;
+ /*@} */
+} drm_agp_info_t;
+
+/**
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+typedef struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+} drm_scatter_gather_t;
+
+/**
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+typedef struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+} drm_set_version_t;
+
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+typedef struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+} drm_gem_close_t;
+
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+typedef struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+} drm_gem_flink_t;
+
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+typedef struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+} drm_gem_open_t;
+
+/**
+ * \name Ioctls Definitions
+ */
+/*@{*/
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, drm_modeset_ctl_t)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, drm_gem_close_t)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, drm_gem_flink_t)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, drm_gem_open_t)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
+/*@}*/
+
+/**
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+#endif /* _DRM_H_ */
diff --git a/usr/src/uts/common/io/drm/drmP.h b/usr/src/uts/common/io/drm/drmP.h
new file mode 100644
index 0000000..16c02e5
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drmP.h
@@ -0,0 +1,1103 @@
+/*
+ * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DRMP_H
+#define _DRMP_H
+
+#include <sys/sysmacros.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/cmn_err.h>
+#include <sys/varargs.h>
+#include <sys/pci.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/pmem.h>
+#include <sys/agpgart.h>
+#include <sys/time.h>
+#include <sys/sysmacros.h>
+#include "drm_atomic.h"
+#include "drm.h"
+#include "queue.h"
+#include "drm_linux_list.h"
+
+#ifndef __inline__
+#define __inline__ inline
+#endif
+
+#if !defined(__FUNCTION__)
+#if defined(C99)
+#define __FUNCTION__ __func__
+#else
+#define __FUNCTION__ " "
+#endif
+#endif
+
+/* DRM space units */
+#define DRM_PAGE_SHIFT PAGESHIFT
+#define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
+#define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
+#define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
+#define DRM_MB2PAGES(x) ((x) << 8)
+#define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
+#define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
+#define DRM_PAGES2KB(x) ((x) << 2)
+#define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
+
+#define PAGE_SHIFT DRM_PAGE_SHIFT
+#define PAGE_SIZE DRM_PAGE_SIZE
+
+#define DRM_MAX_INSTANCES 8
+#define DRM_DEVNODE "drm"
+#define DRM_UNOPENED 0
+#define DRM_OPENED 1
+
+#define DRM_HASH_SIZE 16 /* Size of key hash table */
+#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
+
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_BUFS 6
+#define DRM_MEM_SEGS 7
+#define DRM_MEM_PAGES 8
+#define DRM_MEM_FILES 9
+#define DRM_MEM_QUEUES 10
+#define DRM_MEM_CMDS 11
+#define DRM_MEM_MAPPINGS 12
+#define DRM_MEM_BUFLISTS 13
+#define DRM_MEM_DRMLISTS 14
+#define DRM_MEM_TOTALDRM 15
+#define DRM_MEM_BOUNDDRM 16
+#define DRM_MEM_CTXBITMAP 17
+#define DRM_MEM_STUB 18
+#define DRM_MEM_SGLISTS 19
+#define DRM_MEM_AGPLISTS 20
+#define DRM_MEM_CTXLIST 21
+#define DRM_MEM_MM 22
+#define DRM_MEM_HASHTAB 23
+#define DRM_MEM_OBJECTS 24
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+#define DRM_MAP_HASH_OFFSET 0x10000000
+#define DRM_MAP_HASH_ORDER 12
+#define DRM_OBJECT_HASH_ORDER 12
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#define DRM_MM_INIT_MAX_PAGES 256
+
+
+/* Internal types and structures */
+#define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
+#define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
+#define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+
+#define __OS_HAS_AGP 1
+
+#define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+#define DRM_CURRENTPID ddi_get_pid()
+#define DRM_SPINLOCK(l) mutex_enter(l)
+#define DRM_SPINUNLOCK(u) mutex_exit(u)
+#define DRM_SPINLOCK_ASSERT(l)
+#define DRM_LOCK() mutex_enter(&dev->dev_lock)
+#define DRM_UNLOCK() mutex_exit(&dev->dev_lock)
+#define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
+#define spin_lock_irqsave(l, flag) mutex_enter(l)
+#define spin_unlock_irqrestore(u, flag) mutex_exit(u)
+#define spin_lock(l) mutex_enter(l)
+#define spin_unlock(u) mutex_exit(u)
+
+
+#define DRM_UDELAY(sec) delay(drv_usectohz(sec *1000))
+#define DRM_MEMORYBARRIER()
+
+typedef struct drm_file drm_file_t;
+typedef struct drm_device drm_device_t;
+typedef struct drm_driver_info drm_driver_t;
+
+#define DRM_DEVICE drm_device_t *dev = dev1
+#define DRM_IOCTL_ARGS \
+ drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
+
+#define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
+ if (ddi_copyin((src), (dest), (size), 0)) { \
+ DRM_ERROR("%s: copy from user failed", __func__); \
+ return (EFAULT); \
+ }
+
+#define DRM_COPYTO_WITH_RETURN(dest, src, size) \
+ if (ddi_copyout((src), (dest), (size), 0)) { \
+ DRM_ERROR("%s: copy to user failed", __func__); \
+ return (EFAULT); \
+ }
+
+#define DRM_COPY_FROM_USER(dest, src, size) \
+ ddi_copyin((src), (dest), (size), 0) /* flag for src */
+
+#define DRM_COPY_TO_USER(dest, src, size) \
+ ddi_copyout((src), (dest), (size), 0) /* flags for dest */
+
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+ ddi_copyin((arg2), (arg1), (arg3), 0)
+
+#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
+ ddi_copyout((arg2), arg1, arg3, 0)
+
+#define DRM_READ8(map, offset) \
+ *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
+#define DRM_READ16(map, offset) \
+ *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
+#define DRM_READ32(map, offset) \
+ *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
+#define DRM_WRITE8(map, offset, val) \
+ *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
+#define DRM_WRITE16(map, offset, val) \
+ *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
+#define DRM_WRITE32(map, offset, val) \
+ *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
+
+typedef struct drm_wait_queue {
+ kcondvar_t cv;
+ kmutex_t lock;
+}wait_queue_head_t;
+
+#define DRM_INIT_WAITQUEUE(q, pri) \
+{ \
+ mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
+ cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
+}
+
+#define DRM_FINI_WAITQUEUE(q) \
+{ \
+ mutex_destroy(&(q)->lock); \
+ cv_destroy(&(q)->cv); \
+}
+
+#define DRM_WAKEUP(q) \
+{ \
+ mutex_enter(&(q)->lock); \
+ cv_broadcast(&(q)->cv); \
+ mutex_exit(&(q)->lock); \
+}
+
+#define jiffies ddi_get_lbolt()
+
+#define DRM_WAIT_ON(ret, q, timeout, condition) \
+ mutex_enter(&(q)->lock); \
+ while (!(condition)) { \
+ ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
+ TR_CLOCK_TICK); \
+ if (ret == -1) { \
+ ret = EBUSY; \
+ break; \
+ } else if (ret == 0) { \
+ ret = EINTR; \
+ break; \
+ } else { \
+ ret = 0; \
+ } \
+ } \
+ mutex_exit(&(q)->lock);
+
+#define DRM_WAIT(ret, q, condition) \
+mutex_enter(&(q)->lock); \
+if (!(condition)) { \
+ ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
+ if (ret == -1) { \
+ /* gfx maybe hang */ \
+ if (!(condition)) \
+ ret = -2; \
+ } else { \
+ ret = 0; \
+ } \
+} \
+mutex_exit(&(q)->lock);
+
+
+#define DRM_GETSAREA() \
+{ \
+ drm_local_map_t *map; \
+ DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
+ TAILQ_FOREACH(map, &dev->maplist, link) { \
+ if (map->type == _DRM_SHM && \
+ map->flags & _DRM_CONTAINS_LOCK) { \
+ dev_priv->sarea = map; \
+ break; \
+ } \
+ } \
+}
+
+#define LOCK_TEST_WITH_RETURN(dev, fpriv) \
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
+ dev->lock.filp != fpriv) { \
+ DRM_DEBUG("%s called without lock held", __func__); \
+ return (EINVAL); \
+ }
+
+#define DRM_IRQ_ARGS caddr_t arg
+#define IRQ_HANDLED DDI_INTR_CLAIMED
+#define IRQ_NONE DDI_INTR_UNCLAIMED
+
+enum {
+ DRM_IS_NOT_AGP,
+ DRM_IS_AGP,
+ DRM_MIGHT_BE_AGP
+};
+
+/* Capabilities taken from src/sys/dev/pci/pcireg.h. */
+#ifndef PCIY_AGP
+#define PCIY_AGP 0x02
+#endif
+
+#ifndef PCIY_EXPRESS
+#define PCIY_EXPRESS 0x10
+#endif
+
+#define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
+#define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
+
+#define DRM_GEM_OBJIDR_HASHNODE 1024
+#define idr_list_for_each(entry, head) \
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
+ list_for_each(entry, &(head)->next[key])
+
+/*
+ * wait for 400 milliseconds
+ */
+#define DRM_HZ drv_usectohz(400000)
+
+typedef unsigned long dma_addr_t;
+typedef uint64_t u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+typedef uint_t irqreturn_t;
+
+#define DRM_SUPPORT 1
+#define DRM_UNSUPPORT 0
+
+#define __OS_HAS_AGP 1
+
+typedef struct drm_pci_id_list
+{
+ int vendor;
+ int device;
+ long driver_private;
+ char *name;
+} drm_pci_id_list_t;
+
+#define DRM_AUTH 0x1
+#define DRM_MASTER 0x2
+#define DRM_ROOT_ONLY 0x4
+typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
+typedef struct drm_ioctl_desc {
+ int (*func)(DRM_IOCTL_ARGS);
+ int flags;
+} drm_ioctl_desc_t;
+
+typedef struct drm_magic_entry {
+ drm_magic_t magic;
+ struct drm_file *priv;
+ struct drm_magic_entry *next;
+} drm_magic_entry_t;
+
+typedef struct drm_magic_head {
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
+} drm_magic_head_t;
+
+typedef struct drm_buf {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int order; /* log-base-2(total) */
+ int used; /* Amount of buffer in use (for DMA) */
+ unsigned long offset; /* Byte offset (used internally) */
+ void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
+ struct drm_buf *next; /* Kernel-only: used for free list */
+ volatile int pending; /* On hardware DMA queue */
+ drm_file_t *filp;
+ /* Uniq. identifier of holding process */
+ int context; /* Kernel queue for this buffer */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /* Which list we're on */
+
+ int dev_priv_size; /* Size of buffer private stoarge */
+ void *dev_private; /* Per-buffer private storage */
+} drm_buf_t;
+
+typedef struct drm_freelist {
+ int initialized; /* Freelist in use */
+ uint32_t count; /* Number of free buffers */
+ drm_buf_t *next; /* End pointer */
+
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+} drm_freelist_t;
+
+typedef struct drm_buf_entry {
+ int buf_size;
+ int buf_count;
+ drm_buf_t *buflist;
+ int seg_count;
+ int page_order;
+
+ uint32_t *seglist;
+ unsigned long *seglist_bus;
+
+ drm_freelist_t freelist;
+} drm_buf_entry_t;
+
+typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+
+/* BEGIN CSTYLED */
+typedef struct drm_local_map {
+ unsigned long offset; /* Physical address (0 for SAREA) */
+ unsigned long size; /* Physical size (bytes) */
+ drm_map_type_t type; /* Type of memory mapped */
+ drm_map_flags_t flags; /* Flags */
+ void *handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* Boolean: MTRR used */
+ /* Private data */
+ int rid; /* PCI resource ID for bus_space */
+ int kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
+ caddr_t dev_addr; /* base device address */
+ ddi_acc_handle_t dev_handle; /* The data access handle */
+ ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free */
+ TAILQ_ENTRY(drm_local_map) link;
+} drm_local_map_t;
+/* END CSTYLED */
+
+/*
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /* Reference count of this object */
+ atomic_t refcount;
+
+ /* Handle count of this object. Each handle also holds a reference */
+ atomic_t handlecount;
+
+ /* Related drm device */
+ struct drm_device *dev;
+
+ int flink;
+ /*
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /*
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /*
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /*
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+
+ drm_local_map_t *map;
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ caddr_t kaddr;
+ size_t real_size; /* real size of memory */
+ pfn_t *pfnarray;
+};
+
+struct idr_list {
+ struct idr_list *next, *prev;
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ caddr_t contain_ptr;
+};
+
+struct drm_file {
+ TAILQ_ENTRY(drm_file) link;
+ int authenticated;
+ int master;
+ int minor;
+ pid_t pid;
+ uid_t uid;
+ int refs;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+ void *driver_priv;
+ /* Mapping of mm object handles to object pointers. */
+ struct idr_list object_idr;
+ /* Lock for synchronization of access to object_idr. */
+ kmutex_t table_lock;
+
+ dev_t dev;
+ cred_t *credp;
+};
+
+typedef struct drm_lock_data {
+ drm_hw_lock_t *hw_lock; /* Hardware lock */
+ drm_file_t *filp;
+ /* Uniq. identifier of holding process */
+ kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
+ kmutex_t lock_mutex; /* lock - SOLARIS Specific */
+ unsigned long lock_time; /* Time of last lock in clock ticks */
+} drm_lock_data_t;
+
+/*
+ * This structure, in drm_device_t, is always initialized while the device
+ * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
+ * when set marks that no further bufs may be allocated until device teardown
+ * occurs (when the last open of the device has closed). The high/low
+ * watermarks of bufs are only touched by the X Server, and thus not
+ * concurrently accessed, so no locking is needed.
+ */
+typedef struct drm_device_dma {
+ drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
+ int buf_count;
+ drm_buf_t **buflist; /* Vector of pointers info bufs */
+ int seg_count;
+ int page_count;
+ unsigned long *pagelist;
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02
+ } flags;
+} drm_device_dma_t;
+
+typedef struct drm_agp_mem {
+ void *handle;
+ unsigned long bound; /* address */
+ int pages;
+ caddr_t phys_addr;
+ struct drm_agp_mem *prev;
+ struct drm_agp_mem *next;
+} drm_agp_mem_t;
+
+typedef struct drm_agp_head {
+ agp_info_t agp_info;
+ const char *chipset;
+ drm_agp_mem_t *memory;
+ unsigned long mode;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+ ldi_ident_t agpgart_li;
+ ldi_handle_t agpgart_lh;
+} drm_agp_head_t;
+
+
+typedef struct drm_dma_handle {
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_cookie_t cookie;
+ uint_t cookie_num;
+ uintptr_t vaddr; /* virtual addr */
+ uintptr_t paddr; /* physical addr */
+ size_t real_sz; /* real size of memory */
+} drm_dma_handle_t;
+
+typedef struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ dma_addr_t *busaddr;
+ ddi_umem_cookie_t *umem_cookie;
+ drm_dma_handle_t *dmah_sg;
+ drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
+} drm_sg_mem_t;
+
+/*
+ * Generic memory manager structs
+ */
+
+struct drm_mm_node {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+};
+
+typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
+
+typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
+typedef struct drm_vbl_sig {
+ TAILQ_ENTRY(drm_vbl_sig) link;
+ unsigned int sequence;
+ int signo;
+ int pid;
+} drm_vbl_sig_t;
+
+
+/* used for clone device */
+typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
+typedef struct drm_cminor {
+ TAILQ_ENTRY(drm_cminor) link;
+ drm_file_t *fpriv;
+ int minor;
+} drm_cminor_t;
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+typedef struct ati_pcigart_info {
+ int gart_table_location;
+ int is_pcie;
+ void *addr;
+ dma_addr_t bus_addr;
+ drm_local_map_t mapping;
+} drm_ati_pcigart_info;
+
+/* DRM device structure */
+struct drm_device;
+struct drm_driver_info {
+ int (*load)(struct drm_device *, unsigned long);
+ int (*firstopen)(struct drm_device *);
+ int (*open)(struct drm_device *, drm_file_t *);
+ void (*preclose)(struct drm_device *, drm_file_t *);
+ void (*postclose)(struct drm_device *, drm_file_t *);
+ void (*lastclose)(struct drm_device *);
+ int (*unload)(struct drm_device *);
+ void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
+ int (*presetup)(struct drm_device *);
+ int (*postsetup)(struct drm_device *);
+ int (*open_helper)(struct drm_device *, drm_file_t *);
+ void (*free_filp_priv)(struct drm_device *, drm_file_t *);
+ void (*release)(struct drm_device *, void *);
+ int (*dma_ioctl)(DRM_IOCTL_ARGS);
+ void (*dma_ready)(struct drm_device *);
+ int (*dma_quiescent)(struct drm_device *);
+ int (*dma_flush_block_and_flush)(struct drm_device *,
+ int, drm_lock_flags_t);
+ int (*dma_flush_unblock)(struct drm_device *, int,
+ drm_lock_flags_t);
+ int (*context_ctor)(struct drm_device *, int);
+ int (*context_dtor)(struct drm_device *, int);
+ int (*kernel_context_switch)(struct drm_device *, int, int);
+ int (*kernel_context_switch_unlock)(struct drm_device *);
+ int (*device_is_agp) (struct drm_device *);
+ int (*irq_preinstall)(struct drm_device *);
+ void (*irq_postinstall)(struct drm_device *);
+ void (*irq_uninstall)(struct drm_device *dev);
+ uint_t (*irq_handler)(DRM_IRQ_ARGS);
+ int (*vblank_wait)(struct drm_device *, unsigned int *);
+ int (*vblank_wait2)(struct drm_device *, unsigned int *);
+ /* added for intel minimized vblank */
+ u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
+ int (*enable_vblank)(struct drm_device *dev, int crtc);
+ void (*disable_vblank)(struct drm_device *dev, int crtc);
+
+ /*
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+
+ drm_ioctl_desc_t *driver_ioctls;
+ int max_driver_ioctl;
+
+ int buf_priv_size;
+ int driver_major;
+ int driver_minor;
+ int driver_patchlevel;
+ const char *driver_name; /* Simple driver name */
+ const char *driver_desc; /* Longer driver name */
+ const char *driver_date; /* Date of last major changes. */
+
+ unsigned use_agp :1;
+ unsigned require_agp :1;
+ unsigned use_sg :1;
+ unsigned use_dma :1;
+ unsigned use_pci_dma :1;
+ unsigned use_dma_queue :1;
+ unsigned use_irq :1;
+ unsigned use_vbl_irq :1;
+ unsigned use_vbl_irq2 :1;
+ unsigned use_mtrr :1;
+ unsigned use_gem;
+};
+
+/*
+ * hardware-specific code needs to initialize mutexes which
+ * can be used in interrupt context, so they need to know
+ * the interrupt priority. Interrupt cookie in drm_device
+ * structure is the intr_block field.
+ */
+#define DRM_INTR_PRI(dev) \
+ DDI_INTR_PRI((dev)->intr_block)
+
+struct drm_device {
+ drm_driver_t *driver;
+ drm_cminor_list_t minordevs;
+ dev_info_t *dip;
+ void *drm_handle;
+ int drm_supported;
+ const char *desc; /* current driver description */
+ kmutex_t *irq_mutex;
+ kcondvar_t *irq_cv;
+
+ ddi_iblock_cookie_t intr_block;
+ uint32_t pci_device; /* PCI device id */
+ uint32_t pci_vendor;
+ char *unique; /* Unique identifier: e.g., busid */
+ int unique_len; /* Length of unique field */
+ int if_version; /* Highest interface version set */
+ int flags; /* Flags to open(2) */
+
+ /* Locks */
+ kmutex_t vbl_lock; /* protects vblank operations */
+ kmutex_t dma_lock; /* protects dev->dma */
+ kmutex_t irq_lock; /* protects irq condition checks */
+ kmutex_t dev_lock; /* protects everything else */
+ drm_lock_data_t lock; /* Information on hardware lock */
+ kmutex_t struct_mutex; /* < For others */
+
+ /* Usage Counters */
+ int open_count; /* Outstanding files open */
+ int buf_use; /* Buffers in use -- cannot alloc */
+
+ /* Performance counters */
+ unsigned long counters;
+ drm_stat_type_t types[15];
+ uint32_t counts[15];
+
+ /* Authentication */
+ drm_file_list_t files;
+ drm_magic_head_t magiclist[DRM_HASH_SIZE];
+
+ /* Linked list of mappable regions. Protected by dev_lock */
+ drm_map_list_t maplist;
+
+ drm_local_map_t **context_sareas;
+ int max_context;
+
+ /* DMA queues (contexts) */
+ drm_device_dma_t *dma; /* Optional pointer for DMA support */
+
+ /* Context support */
+ int irq; /* Interrupt used by board */
+ int irq_enabled; /* True if the irq handler is enabled */
+ int pci_domain;
+ int pci_bus;
+ int pci_slot;
+ int pci_func;
+ atomic_t context_flag; /* Context swapping flag */
+ int last_context; /* Last current context */
+
+ /* Only used for Radeon */
+ atomic_t vbl_received;
+ atomic_t vbl_received2;
+
+ drm_vbl_sig_list_t vbl_sig_list;
+ drm_vbl_sig_list_t vbl_sig_list2;
+ /*
+ * At load time, disabling the vblank interrupt won't be allowed since
+ * old clients may not call the modeset ioctl and therefore misbehave.
+ * Once the modeset ioctl *has* been called though, we can safely
+ * disable them when unused.
+ */
+ int vblank_disable_allowed;
+
+ wait_queue_head_t vbl_queue; /* vbl wait channel */
+ /* vbl wait channel array */
+ wait_queue_head_t *vbl_queues;
+
+ /* number of VBLANK interrupts */
+ /* (driver must alloc the right number of counters) */
+ atomic_t *_vblank_count;
+ /* signal list to send on VBLANK */
+ struct drm_vbl_sig_list *vbl_sigs;
+
+ /* number of signals pending on all crtcs */
+ atomic_t vbl_signal_pending;
+ /* number of users of vblank interrupts per crtc */
+ atomic_t *vblank_refcount;
+ /* protected by dev->vbl_lock, used for wraparound handling */
+ u32 *last_vblank;
+ /* so we don't call enable more than */
+ atomic_t *vblank_enabled;
+ /* Display driver is setting mode */
+ int *vblank_inmodeset;
+ /* Don't wait while crtc is likely disabled */
+ int *vblank_suspend;
+ /* size of vblank counter register */
+ u32 max_vblank_count;
+ int num_crtcs;
+ kmutex_t tasklet_lock;
+ void (*locked_tasklet_func)(struct drm_device *dev);
+
+ pid_t buf_pgid;
+ drm_agp_head_t *agp;
+ drm_sg_mem_t *sg; /* Scatter gather memory */
+ uint32_t *ctx_bitmap;
+ void *dev_private;
+ unsigned int agp_buffer_token;
+ drm_local_map_t *agp_buffer_map;
+
+ kstat_t *asoft_ksp; /* kstat support */
+
+ /* name Drawable information */
+ kmutex_t drw_lock;
+ unsigned int drw_bitfield_length;
+ u32 *drw_bitfield;
+ unsigned int drw_info_length;
+ drm_drawable_info_t **drw_info;
+
+ /* \name GEM information */
+ /* @{ */
+ kmutex_t object_name_lock;
+ struct idr_list object_name_idr;
+ atomic_t object_count;
+ atomic_t object_memory;
+ atomic_t pin_count;
+ atomic_t pin_memory;
+ atomic_t gtt_count;
+ atomic_t gtt_memory;
+ uint32_t gtt_total;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
+ /* @} */
+
+ /*
+ * Saving S3 context
+ */
+ void *s3_private;
+};
+
+/* Memory management support (drm_memory.c) */
+void drm_mem_init(void);
+void drm_mem_uninit(void);
+void *drm_alloc(size_t, int);
+void *drm_calloc(size_t, size_t, int);
+void *drm_realloc(void *, size_t, size_t, int);
+void drm_free(void *, size_t, int);
+int drm_ioremap(drm_device_t *, drm_local_map_t *);
+void drm_ioremapfree(drm_local_map_t *);
+
+void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
+void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
+
+void drm_pci_free(drm_device_t *, drm_dma_handle_t *);
+void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
+
+struct drm_local_map *drm_core_findmap(struct drm_device *, unsigned long);
+
+int drm_context_switch(drm_device_t *, int, int);
+int drm_context_switch_complete(drm_device_t *, int);
+int drm_ctxbitmap_init(drm_device_t *);
+void drm_ctxbitmap_cleanup(drm_device_t *);
+void drm_ctxbitmap_free(drm_device_t *, int);
+int drm_ctxbitmap_next(drm_device_t *);
+
+/* Locking IOCTL support (drm_lock.c) */
+int drm_lock_take(drm_lock_data_t *, unsigned int);
+int drm_lock_transfer(drm_device_t *,
+ drm_lock_data_t *, unsigned int);
+int drm_lock_free(drm_device_t *,
+ volatile unsigned int *, unsigned int);
+
+/* Buffer management support (drm_bufs.c) */
+unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
+unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
+int drm_initmap(drm_device_t *, unsigned long, unsigned long,
+ unsigned int, int, int);
+void drm_rmmap(drm_device_t *, drm_local_map_t *);
+int drm_addmap(drm_device_t *, unsigned long, unsigned long,
+ drm_map_type_t, drm_map_flags_t, drm_local_map_t **);
+int drm_order(unsigned long);
+
+/* DMA support (drm_dma.c) */
+int drm_dma_setup(drm_device_t *);
+void drm_dma_takedown(drm_device_t *);
+void drm_free_buffer(drm_device_t *, drm_buf_t *);
+void drm_reclaim_buffers(drm_device_t *, drm_file_t *);
+#define drm_core_reclaim_buffers drm_reclaim_buffers
+
+/* IRQ support (drm_irq.c) */
+int drm_irq_install(drm_device_t *);
+int drm_irq_uninstall(drm_device_t *);
+uint_t drm_irq_handler(DRM_IRQ_ARGS);
+void drm_driver_irq_preinstall(drm_device_t *);
+void drm_driver_irq_postinstall(drm_device_t *);
+void drm_driver_irq_uninstall(drm_device_t *);
+int drm_vblank_wait(drm_device_t *, unsigned int *);
+void drm_vbl_send_signals(drm_device_t *);
+void drm_handle_vblank(struct drm_device *dev, int crtc);
+u32 drm_vblank_count(struct drm_device *dev, int crtc);
+int drm_vblank_get(struct drm_device *dev, int crtc);
+void drm_vblank_put(struct drm_device *dev, int crtc);
+int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+void drm_vblank_cleanup(struct drm_device *dev);
+int drm_modeset_ctl(DRM_IOCTL_ARGS);
+
+/* AGP/GART support (drm_agpsupport.c) */
+int drm_device_is_agp(drm_device_t *);
+int drm_device_is_pcie(drm_device_t *);
+drm_agp_head_t *drm_agp_init(drm_device_t *);
+void drm_agp_fini(drm_device_t *);
+int drm_agp_do_release(drm_device_t *);
+void *drm_agp_allocate_memory(size_t pages,
+ uint32_t type, drm_device_t *dev);
+int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
+int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
+int drm_agp_unbind_memory(unsigned long, drm_device_t *);
+int drm_agp_bind_pages(drm_device_t *dev,
+ pfn_t *pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset);
+int drm_agp_unbind_pages(drm_device_t *dev,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type);
+void drm_agp_chipset_flush(struct drm_device *dev);
+void drm_agp_rebind(struct drm_device *dev);
+
+/* kstat support (drm_kstats.c) */
+int drm_init_kstats(drm_device_t *);
+void drm_fini_kstats(drm_device_t *);
+
+/* Scatter Gather Support (drm_scatter.c) */
+void drm_sg_cleanup(drm_device_t *, drm_sg_mem_t *);
+
+/* ATI PCIGART support (ati_pcigart.c) */
+int drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
+int drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
+
+/* Locking IOCTL support (drm_drv.c) */
+int drm_lock(DRM_IOCTL_ARGS);
+int drm_unlock(DRM_IOCTL_ARGS);
+int drm_version(DRM_IOCTL_ARGS);
+int drm_setversion(DRM_IOCTL_ARGS);
+/* Cache management (drm_cache.c) */
+void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
+
+/* Misc. IOCTL support (drm_ioctl.c) */
+int drm_irq_by_busid(DRM_IOCTL_ARGS);
+int drm_getunique(DRM_IOCTL_ARGS);
+int drm_setunique(DRM_IOCTL_ARGS);
+int drm_getmap(DRM_IOCTL_ARGS);
+int drm_getclient(DRM_IOCTL_ARGS);
+int drm_getstats(DRM_IOCTL_ARGS);
+int drm_noop(DRM_IOCTL_ARGS);
+
+/* Context IOCTL support (drm_context.c) */
+int drm_resctx(DRM_IOCTL_ARGS);
+int drm_addctx(DRM_IOCTL_ARGS);
+int drm_modctx(DRM_IOCTL_ARGS);
+int drm_getctx(DRM_IOCTL_ARGS);
+int drm_switchctx(DRM_IOCTL_ARGS);
+int drm_newctx(DRM_IOCTL_ARGS);
+int drm_rmctx(DRM_IOCTL_ARGS);
+int drm_setsareactx(DRM_IOCTL_ARGS);
+int drm_getsareactx(DRM_IOCTL_ARGS);
+
+/* Drawable IOCTL support (drm_drawable.c) */
+int drm_adddraw(DRM_IOCTL_ARGS);
+int drm_rmdraw(DRM_IOCTL_ARGS);
+int drm_update_draw(DRM_IOCTL_ARGS);
+
+/* Authentication IOCTL support (drm_auth.c) */
+int drm_getmagic(DRM_IOCTL_ARGS);
+int drm_authmagic(DRM_IOCTL_ARGS);
+int drm_remove_magic(drm_device_t *, drm_magic_t);
+drm_file_t *drm_find_file(drm_device_t *, drm_magic_t);
+/* Buffer management support (drm_bufs.c) */
+int drm_addmap_ioctl(DRM_IOCTL_ARGS);
+int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
+int drm_addbufs_ioctl(DRM_IOCTL_ARGS);
+int drm_infobufs(DRM_IOCTL_ARGS);
+int drm_markbufs(DRM_IOCTL_ARGS);
+int drm_freebufs(DRM_IOCTL_ARGS);
+int drm_mapbufs(DRM_IOCTL_ARGS);
+
+/* DMA support (drm_dma.c) */
+int drm_dma(DRM_IOCTL_ARGS);
+
+/* IRQ support (drm_irq.c) */
+int drm_control(DRM_IOCTL_ARGS);
+int drm_wait_vblank(DRM_IOCTL_ARGS);
+
+/* AGP/GART support (drm_agpsupport.c) */
+int drm_agp_acquire(DRM_IOCTL_ARGS);
+int drm_agp_release(DRM_IOCTL_ARGS);
+int drm_agp_enable(DRM_IOCTL_ARGS);
+int drm_agp_info(DRM_IOCTL_ARGS);
+int drm_agp_alloc(DRM_IOCTL_ARGS);
+int drm_agp_free(DRM_IOCTL_ARGS);
+int drm_agp_unbind(DRM_IOCTL_ARGS);
+int drm_agp_bind(DRM_IOCTL_ARGS);
+
+/* Scatter Gather Support (drm_scatter.c) */
+int drm_sg_alloc(DRM_IOCTL_ARGS);
+int drm_sg_free(DRM_IOCTL_ARGS);
+
+/* drm_mm.c */
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size, unsigned alignment);
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment, int best_match);
+
+extern void drm_mm_clean_ml(const struct drm_mm *mm);
+extern int drm_debug_flag;
+
+/* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
+extern void drm_debug(const char *fmt, ...);
+extern void drm_error(const char *fmt, ...);
+extern void drm_info(const char *fmt, ...);
+
+#ifdef DEBUG
+#define DRM_DEBUG if (drm_debug_flag >= 2) drm_debug
+#define DRM_INFO if (drm_debug_flag >= 1) drm_info
+#else
+#define DRM_DEBUG(...)
+#define DRM_INFO(...)
+#endif
+
+#define DRM_ERROR drm_error
+
+
+#define MAX_INSTNUMS 16
+
+extern int drm_dev_to_instance(dev_t);
+extern int drm_dev_to_minor(dev_t);
+extern void *drm_supp_register(dev_info_t *, drm_device_t *);
+extern int drm_supp_unregister(void *);
+
+extern int drm_open(drm_device_t *, drm_cminor_t *, int, int, cred_t *);
+extern int drm_close(drm_device_t *, int, int, int, cred_t *);
+extern int drm_attach(drm_device_t *);
+extern int drm_detach(drm_device_t *);
+extern int drm_probe(drm_device_t *, drm_pci_id_list_t *);
+
+extern int drm_pci_init(drm_device_t *);
+extern void drm_pci_end(drm_device_t *);
+extern int pci_get_info(drm_device_t *, int *, int *, int *);
+extern int pci_get_irq(drm_device_t *);
+extern int pci_get_vendor(drm_device_t *);
+extern int pci_get_device(drm_device_t *);
+
+extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *,
+ drm_drawable_t);
+/* File Operations helpers (drm_fops.c) */
+extern drm_file_t *drm_find_file_by_proc(drm_device_t *, cred_t *);
+extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
+extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
+ cred_t *);
+
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_object_free(struct drm_gem_object *obj);
+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
+ size_t size);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
+
+void drm_gem_object_reference(struct drm_gem_object *obj);
+void drm_gem_object_unreference(struct drm_gem_object *obj);
+
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep);
+void drm_gem_object_handle_reference(struct drm_gem_object *obj);
+
+void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
+ int handle);
+int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
+void drm_gem_open(struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
+
+#endif /* _DRMP_H */
diff --git a/usr/src/uts/common/io/drm/drm_agpsupport.c b/usr/src/uts/common/io/drm/drm_agpsupport.c
new file mode 100644
index 0000000..ae695da
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_agpsupport.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
+ * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include "drm.h"
+#include "drmP.h"
+
+#ifndef AGP_PAGE_SIZE
+#define AGP_PAGE_SIZE 4096
+#define AGP_PAGE_SHIFT 12
+#endif
+
+/*
+ * The agpa_key field of struct agp_allocate_t actually is
+ * an index to an array. It can be zero. But we will use
+ * this agpa_key as a handle returned to userland. Generally,
+ * 0 is not a valid value for a handle, so we add an offset
+ * to the key to get a handle.
+ */
+#define DRM_AGP_KEY_OFFSET 8
+
+extern int drm_supp_device_capability(void *handle, int capid);
+
+/*ARGSUSED*/
+int
+drm_device_is_agp(drm_device_t *dev)
+{
+ int ret;
+
+ if (dev->driver->device_is_agp != NULL) {
+ /*
+ * device_is_agp returns a tristate:
+ * 0 = not AGP;
+ * 1 = definitely AGP;
+ * 2 = fall back to PCI capability
+ */
+ ret = (*dev->driver->device_is_agp)(dev);
+ if (ret != DRM_MIGHT_BE_AGP)
+ return (ret);
+ }
+
+ return (drm_supp_device_capability(dev->drm_handle, PCIY_AGP));
+
+}
+
+/*ARGSUSED*/
+int
+drm_device_is_pcie(drm_device_t *dev)
+{
+ return (drm_supp_device_capability(dev->drm_handle, PCIY_EXPRESS));
+}
+
+
+/*ARGSUSED*/
+int
+drm_agp_info(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ agp_info_t *agpinfo;
+ drm_agp_info_t info;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return (EINVAL);
+
+ agpinfo = &dev->agp->agp_info;
+ info.agp_version_major = agpinfo->agpi_version.agpv_major;
+ info.agp_version_minor = agpinfo->agpi_version.agpv_minor;
+ info.mode = agpinfo->agpi_mode;
+ info.aperture_base = agpinfo->agpi_aperbase;
+ info.aperture_size = agpinfo->agpi_apersize* 1024 * 1024;
+ info.memory_allowed = agpinfo->agpi_pgtotal << PAGE_SHIFT;
+ info.memory_used = agpinfo->agpi_pgused << PAGE_SHIFT;
+ info.id_vendor = agpinfo->agpi_devid & 0xffff;
+ info.id_device = agpinfo->agpi_devid >> 16;
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &info, sizeof (info));
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_acquire(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret, rval;
+
+ if (!dev->agp) {
+ DRM_ERROR("drm_agp_acquire : agp isn't initialized yet");
+ return (ENODEV);
+ }
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ACQUIRE,
+ (uintptr_t)0, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("drm_agp_acquired: AGPIOC_ACQUIRE failed\n");
+ return (EIO);
+ }
+ dev->agp->acquired = 1;
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_release(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret, rval;
+
+ if (!dev->agp)
+ return (ENODEV);
+ if (!dev->agp->acquired)
+ return (EBUSY);
+
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
+ (intptr_t)0, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("drm_agp_release: AGPIOC_RELEASE failed\n");
+ return (ENXIO);
+ }
+ dev->agp->acquired = 0;
+
+ return (ret);
+}
+
+
+int
+drm_agp_do_release(drm_device_t *dev)
+{
+ int ret, rval;
+
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
+ (intptr_t)0, FKIOCTL, kcred, &rval);
+
+ if (ret == 0)
+ dev->agp->acquired = 0;
+
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_enable(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_agp_mode_t modes;
+ agp_setup_t setup;
+ int ret, rval;
+
+ if (!dev->agp)
+ return (ENODEV);
+ if (!dev->agp->acquired)
+ return (EBUSY);
+
+ DRM_COPYFROM_WITH_RETURN(&modes, (void *)data, sizeof (modes));
+
+ dev->agp->mode = modes.mode;
+ setup.agps_mode = (uint32_t)modes.mode;
+
+
+ DRM_DEBUG("drm_agp_enable: dev->agp->mode=%lx", modes.mode);
+
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_SETUP,
+ (intptr_t)&setup, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("drm_agp_enable: failed");
+ return (EIO);
+ }
+
+ dev->agp->base = dev->agp->agp_info.agpi_aperbase;
+ dev->agp->enabled = 1;
+
+ DRM_DEBUG("drm_agp_enable: dev->agp->base=0x%lx", dev->agp->base);
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_agp_mem_t *entry;
+ agp_allocate_t alloc;
+ drm_agp_buffer_t request;
+ int pages;
+ int ret, rval;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return (EINVAL);
+
+ DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+
+ entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
+
+ pages = btopr(request.size);
+ alloc.agpa_pgcount = pages;
+ alloc.agpa_type = AGP_NORMAL;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ALLOCATE,
+ (intptr_t)&alloc, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("drm_agp_alloc: AGPIOC_ALLOCATE failed, ret=%d", ret);
+ kmem_free(entry, sizeof (*entry));
+ return (ret);
+ }
+
+ entry->bound = 0;
+ entry->pages = pages;
+ entry->handle = (void*)(uintptr_t)(alloc.agpa_key + DRM_AGP_KEY_OFFSET);
+ entry->prev = NULL;
+ entry->phys_addr = (void*)(uintptr_t)alloc.agpa_physical;
+ entry->next = dev->agp->memory;
+ if (dev->agp->memory)
+ dev->agp->memory->prev = entry;
+ dev->agp->memory = entry;
+
+ DRM_DEBUG("entry->phys_addr %lx", entry->phys_addr);
+
+ /* physical is used only by i810 driver */
+ request.physical = alloc.agpa_physical;
+ request.handle = (unsigned long)entry->handle;
+
+ /*
+ * If failed to ddi_copyout(), we will free allocated AGP memory
+ * when closing drm
+ */
+ DRM_COPYTO_WITH_RETURN((void *)data, &request, sizeof (request));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static drm_agp_mem_t *
+drm_agp_lookup_entry(drm_device_t *dev, void *handle)
+{
+ drm_agp_mem_t *entry;
+
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if (entry->handle == handle)
+ return (entry);
+ }
+
+ return (NULL);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_unbind(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ agp_unbind_t unbind;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int ret, rval;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return (EINVAL);
+
+ DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+
+ if (!(entry = drm_agp_lookup_entry(dev, (void *)request.handle)))
+ return (EINVAL);
+ if (!entry->bound)
+ return (EINVAL);
+
+ unbind.agpu_pri = 0;
+ unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("drm_agp_unbind: AGPIOC_UNBIND failed");
+ return (EIO);
+ }
+ entry->bound = 0;
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_bind(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int start;
+ uint_t key;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return (EINVAL);
+
+ DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+
+ entry = drm_agp_lookup_entry(dev, (void *)request.handle);
+ if (!entry || entry->bound)
+ return (EINVAL);
+
+ key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ start = btopr(request.offset);
+ if (drm_agp_bind_memory(key, start, dev)) {
+ DRM_ERROR("drm_agp_bind: failed key=%x, start=0x%x, "
+ "agp_base=0x%lx", key, start, dev->agp->base);
+ return (EIO);
+ }
+
+ entry->bound = dev->agp->base + (start << AGP_PAGE_SHIFT);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_free(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+ int ret, rval;
+ int agpu_key;
+
+ DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+ if (!dev->agp || !dev->agp->acquired)
+ return (EINVAL);
+ if (!(entry = drm_agp_lookup_entry(dev, (void *)request.handle)))
+ return (EINVAL);
+ if (entry->bound)
+ (void) drm_agp_unbind_memory(request.handle, dev);
+
+ if (entry == dev->agp->memory)
+ dev->agp->memory = entry->next;
+ if (entry->prev)
+ entry->prev->next = entry->next;
+ if (entry->next)
+ entry->next->prev = entry->prev;
+
+ agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_DEALLOCATE,
+ (intptr_t)agpu_key, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("drm_agp_free: AGPIOC_DEALLOCATE failed,"
+ "akey=%d, ret=%d", agpu_key, ret);
+ return (EIO);
+ }
+ drm_free(entry, sizeof (*entry), DRM_MEM_AGPLISTS);
+ return (0);
+}
+
+/*ARGSUSED*/
+drm_agp_head_t *
+drm_agp_init(drm_device_t *dev)
+{
+ drm_agp_head_t *agp = NULL;
+ int retval, rval;
+
+ agp = kmem_zalloc(sizeof (drm_agp_head_t), KM_SLEEP);
+
+ retval = ldi_ident_from_dip(dev->dip, &agp->agpgart_li);
+ if (retval != 0) {
+ DRM_ERROR("drm_agp_init: failed to get layerd ident, retval=%d",
+ retval);
+ goto err_1;
+ }
+
+ retval = ldi_open_by_name(AGP_DEVICE, FEXCL, kcred,
+ &agp->agpgart_lh, agp->agpgart_li);
+ if (retval != 0) {
+ DRM_ERROR("drm_agp_init: failed to open %s, retval=%d",
+ AGP_DEVICE, retval);
+ goto err_2;
+ }
+
+ retval = ldi_ioctl(agp->agpgart_lh, AGPIOC_INFO,
+ (intptr_t)&agp->agp_info, FKIOCTL, kcred, &rval);
+
+ if (retval != 0) {
+ DRM_ERROR("drm_agp_init: failed to get agpinfo, retval=%d",
+ retval);
+ goto err_3;
+ }
+
+ return (agp);
+
+err_3:
+ (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
+
+err_2:
+ ldi_ident_release(agp->agpgart_li);
+
+err_1:
+ kmem_free(agp, sizeof (drm_agp_head_t));
+ return (NULL);
+}
+
+/*ARGSUSED*/
+void
+drm_agp_fini(drm_device_t *dev)
+{
+ drm_agp_head_t *agp = dev->agp;
+ (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
+ ldi_ident_release(agp->agpgart_li);
+ kmem_free(agp, sizeof (drm_agp_head_t));
+ dev->agp = NULL;
+}
+
+
+/*ARGSUSED*/
+void *
+drm_agp_allocate_memory(size_t pages, uint32_t type, drm_device_t *dev)
+{
+ return (NULL);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev)
+{
+ return (1);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_bind_memory(unsigned int key, uint32_t start, drm_device_t *dev)
+{
+ agp_bind_t bind;
+ int ret, rval;
+
+ bind.agpb_pgstart = start;
+ bind.agpb_key = key;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_BIND,
+ (intptr_t)&bind, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_DEBUG("drm_agp_bind_meory: AGPIOC_BIND failed");
+ return (EIO);
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_agp_unbind_memory(unsigned long handle, drm_device_t *dev)
+{
+ agp_unbind_t unbind;
+ drm_agp_mem_t *entry;
+ int ret, rval;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return (EINVAL);
+
+ entry = drm_agp_lookup_entry(dev, (void *)handle);
+ if (!entry || !entry->bound)
+ return (EINVAL);
+
+ unbind.agpu_pri = 0;
+ unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("drm_agp_unbind: AGPIO_UNBIND failed");
+ return (EIO);
+ }
+ entry->bound = 0;
+ return (0);
+}
+
+/*
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+int
+drm_agp_bind_pages(drm_device_t *dev,
+ pfn_t *pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset)
+{
+
+ agp_bind_pages_t bind;
+ int ret, rval;
+
+ bind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ bind.agpb_pgcount = num_pages;
+ bind.agpb_pages = pages;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_BIND,
+ (intptr_t)&bind, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("AGPIOC_PAGES_BIND failed ret %d", ret);
+ return (ret);
+ }
+ return (0);
+}
+
+int
+drm_agp_unbind_pages(drm_device_t *dev,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type)
+{
+
+ agp_unbind_pages_t unbind;
+ int ret, rval;
+
+ unbind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ unbind.agpb_pgcount = num_pages;
+ unbind.agpb_type = type;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_DEBUG("drm_agp_unbind_pages AGPIOC_PAGES_UNBIND failed");
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * Certain Intel chipsets contains a global write buffer, and this can require
+ * flushing from the drm or X.org to make sure all data has hit RAM before
+ * initiating a GPU transfer, due to a lack of coherency with the integrated
+ * graphics device and this buffer.
+ */
+void
+drm_agp_chipset_flush(struct drm_device *dev)
+{
+ int ret, rval;
+
+ DRM_DEBUG("agp_chipset_flush");
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_FLUSHCHIPSET,
+ (intptr_t)0, FKIOCTL, kcred, &rval);
+ if (ret != 0) {
+ DRM_ERROR("Failed to drm_agp_chipset_flush ret %d", ret);
+ }
+}
+
+/*
+ * The pages are evict on suspend, so re-bind it at resume time
+ */
+void
+drm_agp_rebind(struct drm_device *dev)
+{
+ int ret, rval;
+
+ if (!dev->agp) {
+ return;
+ }
+
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_REBIND,
+ (intptr_t)0, FKIOCTL, kcred, &rval);
+ if (ret != 0) {
+ DRM_ERROR("rebind failed %d", ret);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_atomic.h b/usr/src/uts/common/io/drm/drm_atomic.h
new file mode 100644
index 0000000..0adc70c
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_atomic.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * \file drm_atomic.h
+ * Atomic operations used in the DRM which may or may not be provided by the OS.
+ *
+ * \author Eric Anholt <anholt@FreeBSD.org>
+ */
+
+/*
+ * Copyright 2004 Eric Anholt
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Many of these implementations are rather fake, but good enough. */
+
+
+
+#ifndef _SYS_DRM_ATOMIC_H_
+#define _SYS_DRM_ATOMIC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/atomic.h>
+
+#ifdef __LINT__
+#undef inline
+#define inline
+#endif
+typedef uint32_t atomic_t;
+
+#define atomic_set(p, v) (*(p) = (v))
+#define atomic_read(p) (*(p))
+#define atomic_inc(p) atomic_inc_uint(p)
+#define atomic_dec(p) atomic_dec_uint(p)
+#define atomic_add(n, p) atomic_add_int(p, n)
+#define atomic_sub(n, p) atomic_add_int(p, -n)
+#define atomic_set_int(p, bits) atomic_or_uint(p, bits)
+#define atomic_clear_int(p, bits) atomic_and_uint(p, ~(bits))
+#define atomic_cmpset_int(p, c, n) \
+ ((c == atomic_cas_uint(p, c, n)) ? 1 : 0)
+
+#define set_bit(b, p) \
+ atomic_set_int(((volatile uint_t *)(void *)p) + (b >> 5), \
+ 1 << (b & 0x1f))
+
+#define clear_bit(b, p) \
+ atomic_clear_int(((volatile uint_t *)(void *)p) + (b >> 5), \
+ 1 << (b & 0x1f))
+
+#define test_bit(b, p) \
+ (((volatile uint_t *)(void *)p)[b >> 5] & (1 << (b & 0x1f)))
+
+/*
+ * Note: this routine doesn't return old value. It return
+ * 0 when succeeds, or -1 when fails.
+ */
+#ifdef _LP64
+#define test_and_set_bit(b, p) \
+ atomic_set_long_excl(((ulong_t *)(void *)p) + (b >> 6), (b & 0x3f))
+#else
+#define test_and_set_bit(b, p) \
+ atomic_set_long_excl(((ulong_t *)(void *)p) + (b >> 5), (b & 0x1f))
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DRM_ATOMIC_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_auth.c b/usr/src/uts/common/io/drm/drm_auth.c
new file mode 100644
index 0000000..23fec5e
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_auth.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+
+static int
+drm_hash_magic(drm_magic_t magic)
+{
+ return (magic & (DRM_HASH_SIZE-1));
+}
+
+drm_file_t *
+drm_find_file(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_file_t *retval = NULL;
+ drm_magic_entry_t *pt;
+ int hash;
+
+ hash = drm_hash_magic(magic);
+ for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ if (pt->magic == magic) {
+ retval = pt->priv;
+ break;
+ }
+ }
+
+ return (retval);
+}
+
+static int
+drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
+{
+ int hash;
+ drm_magic_entry_t *entry;
+
+ hash = drm_hash_magic(magic);
+ entry = drm_alloc(sizeof (*entry), DRM_MEM_MAGIC);
+ if (!entry)
+ return (ENOMEM);
+ entry->magic = magic;
+ entry->priv = priv;
+ entry->next = NULL;
+
+ DRM_LOCK();
+ if (dev->magiclist[hash].tail) {
+ dev->magiclist[hash].tail->next = entry;
+ dev->magiclist[hash].tail = entry;
+ } else {
+ dev->magiclist[hash].head = entry;
+ dev->magiclist[hash].tail = entry;
+ }
+ DRM_UNLOCK();
+
+ return (0);
+}
+
+int
+drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_magic_entry_t *prev = NULL;
+ drm_magic_entry_t *pt;
+ int hash;
+
+ DRM_DEBUG("drm_remove_magic : %d", magic);
+ hash = drm_hash_magic(magic);
+
+ DRM_LOCK();
+ for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
+ if (pt->magic == magic) {
+ if (dev->magiclist[hash].head == pt) {
+ dev->magiclist[hash].head = pt->next;
+ }
+ if (dev->magiclist[hash].tail == pt) {
+ dev->magiclist[hash].tail = prev;
+ }
+ if (prev) {
+ prev->next = pt->next;
+ }
+ DRM_UNLOCK();
+ drm_free(pt, sizeof (*pt), DRM_MEM_MAGIC);
+ return (0);
+ }
+ }
+ DRM_UNLOCK();
+
+ return (EINVAL);
+}
+
+/*ARGSUSED*/
+int
+drm_getmagic(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ static drm_magic_t sequence = 0;
+ drm_auth_t auth;
+
+ /* Find unique magic */
+ if (fpriv->magic) {
+ auth.magic = fpriv->magic;
+ } else {
+ do {
+ int old = sequence;
+ auth.magic = old+1;
+ if (!atomic_cmpset_int(&sequence, old, auth.magic))
+ continue;
+ } while (drm_find_file(dev, auth.magic));
+ fpriv->magic = auth.magic;
+ (void) drm_add_magic(dev, fpriv, auth.magic);
+ }
+
+
+ DRM_DEBUG("drm_getmagic: %u", auth.magic);
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &auth, sizeof (auth));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_authmagic(DRM_IOCTL_ARGS)
+{
+ drm_auth_t auth;
+ drm_file_t *file;
+ DRM_DEVICE;
+
+ DRM_COPYFROM_WITH_RETURN(&auth, (void *)data, sizeof (auth));
+
+ if ((file = drm_find_file(dev, auth.magic))) {
+ file->authenticated = 1;
+ (void) drm_remove_magic(dev, auth.magic);
+ return (0);
+ }
+ return (EINVAL);
+}
diff --git a/usr/src/uts/common/io/drm/drm_bufs.c b/usr/src/uts/common/io/drm/drm_bufs.c
new file mode 100644
index 0000000..ec01d37
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_bufs.c
@@ -0,0 +1,897 @@
+/*
+ * drm_bufs.h -- Generic buffer template -*- linux-c -*-
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+#include <gfx_private.h>
+#include "drm_io32.h"
+
+
+#define PAGE_MASK (PAGE_SIZE-1)
+#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
+
+/*
+ * Compute order. Can be made faster.
+ */
+int
+drm_order(unsigned long size)
+{
+ int order = 0;
+ unsigned long tmp = size;
+
+ while (tmp >>= 1)
+ order ++;
+
+ if (size & ~(1 << order))
+ ++order;
+
+ return (order);
+}
+
+static inline drm_local_map_t *
+drm_find_map(drm_device_t *dev, u_offset_t offset, int type)
+{
+ drm_local_map_t *map;
+
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if ((map->type == type) && ((map->offset == offset) ||
+ (map->flags == _DRM_CONTAINS_LOCK) &&
+ (map->type == _DRM_SHM)))
+ return (map);
+ }
+
+ return (NULL);
+}
+
+int drm_addmap(drm_device_t *dev, unsigned long offset,
+ unsigned long size, drm_map_type_t type,
+ drm_map_flags_t flags, drm_local_map_t **map_ptr)
+{
+ drm_local_map_t *map;
+ caddr_t kva;
+ int retval;
+
+ /*
+ * Only allow shared memory to be removable since we only keep
+ * enough book keeping information about shared memory to allow
+ * for removal when processes fork.
+ */
+ if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
+ return (EINVAL);
+ if ((offset & PAGE_MASK) || (size & PAGE_MASK))
+ return (EINVAL);
+ if (offset + size < offset)
+ return (EINVAL);
+
+ /*
+ * Check if this is just another version of a kernel-allocated
+ * map, and just hand that back if so.
+ */
+ map = drm_find_map(dev, offset, type);
+ if (map != NULL) {
+ goto done;
+ }
+
+ /*
+ * Allocate a new map structure, fill it in, and do any
+ * type-specific initialization necessary.
+ */
+ map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
+ if (!map)
+ return (ENOMEM);
+
+ map->offset = offset;
+ map->size = size;
+ map->type = type;
+ map->flags = flags;
+
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+ retval = drm_ioremap(dev, map);
+ if (retval)
+ return (retval);
+ break;
+
+ case _DRM_SHM:
+ /*
+ * ddi_umem_alloc() grants page-aligned memory. We needn't
+ * handle alignment issue here.
+ */
+ map->handle = ddi_umem_alloc(map->size,
+ DDI_UMEM_NOSLEEP, &map->drm_umem_cookie);
+ if (!map->handle) {
+ DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
+ drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+ return (ENOMEM);
+ }
+ /*
+ * record only low 32-bit of this handle, since 32-bit
+ * user app is incapable of passing in 64bit offset when
+ * doing mmap.
+ */
+ map->offset = (uintptr_t)map->handle;
+ map->offset &= 0xffffffffUL;
+ if (map->flags & _DRM_CONTAINS_LOCK) {
+ /* Prevent a 2nd X Server from creating a 2nd lock */
+ if (dev->lock.hw_lock != NULL) {
+ ddi_umem_free(map->drm_umem_cookie);
+ drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+ return (EBUSY);
+ }
+ dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ }
+ map->dev_addr = map->handle;
+ break;
+ case _DRM_SCATTER_GATHER:
+ if (!dev->sg) {
+ drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+ return (EINVAL);
+ }
+ map->offset += (uintptr_t)dev->sg->virtual;
+ map->handle = (void *)(uintptr_t)map->offset;
+ map->dev_addr = dev->sg->virtual;
+ map->dev_handle = dev->sg->dmah_sg->acc_hdl;
+ break;
+
+ case _DRM_CONSISTENT:
+ DRM_ERROR("%d DRM_AGP_CONSISTENT", __LINE__);
+ return (ENOTSUP);
+ case _DRM_AGP:
+ map->offset += dev->agp->base;
+ kva = gfxp_map_kernel_space(map->offset, map->size,
+ GFXP_MEMORY_WRITECOMBINED);
+ if (kva == 0) {
+ drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+ cmn_err(CE_WARN,
+ "drm_addmap: failed to map AGP aperture");
+ return (ENOMEM);
+ }
+ map->handle = (void *)(uintptr_t)kva;
+ map->dev_addr = kva;
+ break;
+ default:
+ drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+ return (EINVAL);
+ }
+
+ TAILQ_INSERT_TAIL(&dev->maplist, map, link);
+
+done:
+ /* Jumped to, with lock held, when a kernel map is found. */
+ *map_ptr = map;
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_addmap_ioctl(DRM_IOCTL_ARGS)
+{
+ drm_map_t request;
+ drm_local_map_t *map;
+ int err;
+ DRM_DEVICE;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_map_32_t request32;
+ DRM_COPYFROM_WITH_RETURN(&request32,
+ (void *)data, sizeof (request32));
+ request.offset = request32.offset;
+ request.size = request32.size;
+ request.type = request32.type;
+ request.flags = request32.flags;
+ request.mtrr = request32.mtrr;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request,
+ (void *)data, sizeof (request));
+
+ err = drm_addmap(dev, request.offset, request.size, request.type,
+ request.flags, &map);
+
+ if (err != 0)
+ return (err);
+
+ request.offset = map->offset;
+ request.size = map->size;
+ request.type = map->type;
+ request.flags = map->flags;
+ request.mtrr = map->mtrr;
+ request.handle = (uintptr_t)map->handle;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_map_32_t request32;
+ request32.offset = request.offset;
+ request32.size = (uint32_t)request.size;
+ request32.type = request.type;
+ request32.flags = request.flags;
+ request32.handle = request.handle;
+ request32.mtrr = request.mtrr;
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &request32, sizeof (request32));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &request, sizeof (request));
+
+ return (0);
+}
+
+void
+drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
+{
+ DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+
+ TAILQ_REMOVE(&dev->maplist, map, link);
+
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ drm_ioremapfree(map);
+ break;
+ /* FALLTHROUGH */
+ case _DRM_FRAME_BUFFER:
+ drm_ioremapfree(map);
+ break;
+ case _DRM_SHM:
+ ddi_umem_free(map->drm_umem_cookie);
+ break;
+ case _DRM_AGP:
+ /*
+ * we mapped AGP aperture into kernel space in drm_addmap,
+ * here, unmap them and release kernel virtual address space
+ */
+ gfxp_unmap_kernel_space(map->dev_addr, map->size);
+ break;
+
+ case _DRM_SCATTER_GATHER:
+ break;
+ case _DRM_CONSISTENT:
+ break;
+ default:
+ break;
+ }
+
+ drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+}
+
+/*
+ * Remove a map private from list and deallocate resources if the
+ * mapping isn't in use.
+ */
+/*ARGSUSED*/
+int
+drm_rmmap_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_local_map_t *map;
+ drm_map_t request;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_map_32_t request32;
+ DRM_COPYFROM_WITH_RETURN(&request32,
+ (void *)data, sizeof (drm_map_32_t));
+ request.offset = request32.offset;
+ request.size = request32.size;
+ request.type = request32.type;
+ request.flags = request32.flags;
+ request.handle = request32.handle;
+ request.mtrr = request32.mtrr;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request,
+ (void *)data, sizeof (request));
+
+ DRM_LOCK();
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (((uintptr_t)map->handle == (request.handle & 0xffffffff)) &&
+ (map->flags & _DRM_REMOVABLE))
+ break;
+ }
+
+ /* No match found. */
+ if (map == NULL) {
+ DRM_UNLOCK();
+ return (EINVAL);
+ }
+
+ drm_rmmap(dev, map);
+ DRM_UNLOCK();
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static void
+drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
+{
+ int i;
+
+ if (entry->seg_count) {
+ for (i = 0; i < entry->seg_count; i++) {
+ if (entry->seglist[i]) {
+ DRM_ERROR(
+ "drm_cleanup_buf_error: not implemented");
+ }
+ }
+ drm_free(entry->seglist,
+ entry->seg_count *
+ sizeof (*entry->seglist), DRM_MEM_SEGS);
+ entry->seg_count = 0;
+ }
+
+ if (entry->buf_count) {
+ for (i = 0; i < entry->buf_count; i++) {
+ if (entry->buflist[i].dev_private) {
+ drm_free(entry->buflist[i].dev_private,
+ entry->buflist[i].dev_priv_size,
+ DRM_MEM_BUFS);
+ }
+ }
+ drm_free(entry->buflist,
+ entry->buf_count *
+ sizeof (*entry->buflist), DRM_MEM_BUFS);
+ entry->buflist = NULL;
+ entry->buf_count = 0;
+ }
+}
+
+/*ARGSUSED*/
+int
+drm_markbufs(DRM_IOCTL_ARGS)
+{
+ DRM_DEBUG("drm_markbufs");
+ return (EINVAL);
+}
+
+/*ARGSUSED*/
+int
+drm_infobufs(DRM_IOCTL_ARGS)
+{
+ DRM_DEBUG("drm_infobufs");
+ return (EINVAL);
+}
+
+static int
+drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_entry_t *entry;
+ drm_buf_t **temp_buflist;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int byte_count;
+ int i;
+
+ if (!dma)
+ return (EINVAL);
+
+ count = request->count;
+ order = drm_order(request->size);
+ size = 1 << order;
+
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+
+ byte_count = 0;
+ agp_offset = dev->agp->base + request->agp_start;
+
+ entry = &dma->bufs[order];
+
+ /* No more than one allocation per order */
+ if (entry->buf_count) {
+ return (ENOMEM);
+ }
+
+ entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ return (ENOMEM);
+ }
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while (entry->buf_count < count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->filp = NULL;
+
+ buf->dev_priv_size = dev->driver->buf_priv_size;
+ buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+ if (buf->dev_private == NULL) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ drm_cleanup_buf_error(dev, entry);
+ return (ENOMEM);
+ }
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ temp_buflist = drm_alloc(
+ (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
+ DRM_MEM_BUFS);
+
+ if (temp_buflist == NULL) {
+ /* Free the entry because it isn't valid */
+ drm_cleanup_buf_error(dev, entry);
+ DRM_ERROR(" temp_buflist is NULL");
+ return (ENOMEM);
+ }
+
+ bcopy(temp_buflist, dma->buflist,
+ dma->buf_count * sizeof (*dma->buflist));
+ kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
+ dma->buflist = temp_buflist;
+
+ for (i = 0; i < entry->buf_count; i++) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += byte_count >> PAGE_SHIFT;
+
+ request->count = entry->buf_count;
+ request->size = size;
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ return (0);
+}
+
+static int
+drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int byte_count;
+ int i;
+ drm_buf_t **temp_buflist;
+
+ count = request->count;
+ order = drm_order(request->size);
+ size = 1 << order;
+
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+
+ byte_count = 0;
+ agp_offset = request->agp_start;
+ entry = &dma->bufs[order];
+
+ entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
+ DRM_MEM_BUFS);
+ if (entry->buflist == NULL)
+ return (ENOMEM);
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while (entry->buf_count < count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset + dev->sg->handle);
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->filp = NULL;
+
+ buf->dev_priv_size = dev->driver->buf_priv_size;
+ buf->dev_private = drm_alloc(buf->dev_priv_size,
+ DRM_MEM_BUFS);
+ if (buf->dev_private == NULL) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ drm_cleanup_buf_error(dev, entry);
+ return (ENOMEM);
+ }
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ temp_buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof (*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof (*dma->buflist), DRM_MEM_BUFS);
+ if (!temp_buflist) {
+ drm_cleanup_buf_error(dev, entry);
+ return (ENOMEM);
+ }
+ dma->buflist = temp_buflist;
+
+ for (i = 0; i < entry->buf_count; i++) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+ request->count = entry->buf_count;
+ request->size = size;
+ dma->flags = _DRM_DMA_USE_SG;
+
+ return (0);
+}
+
+int
+drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
+{
+ int order, ret;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+
+ if (request->count < 0 || request->count > 4096) {
+ DRM_SPINLOCK(&dev->dma_lock);
+ return (EINVAL);
+ }
+
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
+ DRM_SPINLOCK(&dev->dma_lock);
+ return (EINVAL);
+ }
+
+ /* No more allocations after first buffer-using ioctl. */
+ if (dev->buf_use != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return (EBUSY);
+ }
+ /* No more than one allocation per order */
+ if (dev->dma->bufs[order].buf_count != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return (ENOMEM);
+ }
+
+ ret = drm_do_addbufs_agp(dev, request);
+
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ return (ret);
+}
+
+int
+drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
+{
+ int order, ret;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+
+ if (request->count < 0 || request->count > 4096) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return (EINVAL);
+ }
+
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return (EINVAL);
+ }
+
+ /* No more allocations after first buffer-using ioctl. */
+ if (dev->buf_use != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return (EBUSY);
+ }
+
+ /* No more than one allocation per order */
+ if (dev->dma->bufs[order].buf_count != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return (ENOMEM);
+ }
+
+ ret = drm_do_addbufs_sg(dev, request);
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+drm_addbufs_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_buf_desc_t request;
+ int err;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_buf_desc_32_t request32;
+ DRM_COPYFROM_WITH_RETURN(&request32,
+ (void *)data, sizeof (request32));
+ request.count = request32.count;
+ request.size = request32.size;
+ request.low_mark = request32.low_mark;
+ request.high_mark = request32.high_mark;
+ request.flags = request32.flags;
+ request.agp_start = request32.agp_start;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request,
+ (void *)data, sizeof (request));
+
+ if (request.flags & _DRM_AGP_BUFFER)
+ err = drm_addbufs_agp(dev, &request);
+ else if (request.flags & _DRM_SG_BUFFER)
+ err = drm_addbufs_sg(dev, &request);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_buf_desc_32_t request32;
+ request32.count = request.count;
+ request32.size = request.size;
+ request32.low_mark = request.low_mark;
+ request32.high_mark = request.high_mark;
+ request32.flags = request.flags;
+ request32.agp_start = (uint32_t)request.agp_start;
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &request32, sizeof (request32));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &request, sizeof (request));
+
+ return (err);
+}
+
+/*ARGSUSED*/
+int
+drm_freebufs(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+ int retcode = 0;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_buf_free_32_t request32;
+ DRM_COPYFROM_WITH_RETURN(&request32,
+ (void*)data, sizeof (request32));
+ request.count = request32.count;
+ request.list = (int *)(uintptr_t)request32.list;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request,
+ (void *)data, sizeof (request));
+
+ for (i = 0; i < request.count; i++) {
+ if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
+ retcode = EFAULT;
+ break;
+ }
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ retcode = EINVAL;
+ break;
+ }
+ buf = dma->buflist[idx];
+ if (buf->filp != fpriv) {
+ DRM_ERROR(
+ "drm_freebufs: process %d not owning the buffer.\n",
+ DRM_CURRENTPID);
+ retcode = EINVAL;
+ break;
+ }
+ drm_free_buffer(dev, buf);
+ }
+
+ return (retcode);
+}
+
+#ifdef _LP64
+extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
+#define drm_smmap smmap64
+#else
+#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
+extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
+#define drm_smmap smmap32
+#else
+#error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
+#endif
+#endif
+
+
+/*ARGSUSED*/
+int
+drm_mapbufs(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_buf_map_t request;
+ const int zero = 0;
+ unsigned long vaddr;
+ unsigned long address;
+ drm_device_dma_t *dma = dev->dma;
+ uint_t size;
+ uint_t foff;
+ int ret_tmp;
+ int i;
+
+#ifdef _MULTI_DATAMODEL
+ drm_buf_map_32_t request32;
+ drm_buf_pub_32_t *list32;
+ uint_t address32;
+
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ DRM_COPYFROM_WITH_RETURN(&request32,
+ (void *)data, sizeof (request32));
+ request.count = request32.count;
+ request.virtual = (void *)(uintptr_t)request32.virtual;
+ request.list = (drm_buf_pub_t *)(uintptr_t)request32.list;
+ request.fd = request32.fd;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request,
+ (void *)data, sizeof (request));
+
+ dev->buf_use++;
+
+ if (request.count < dma->buf_count)
+ goto done;
+
+ if ((dev->driver->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
+ (dev->driver->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
+ drm_local_map_t *map = dev->agp_buffer_map;
+ if (map == NULL)
+ return (EINVAL);
+ size = round_page(map->size);
+ foff = (uintptr_t)map->handle;
+ } else {
+ size = round_page(dma->byte_count);
+ foff = 0;
+ }
+ request.virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, request.fd, foff);
+ if (request.virtual == NULL) {
+ DRM_ERROR("drm_mapbufs: request.virtual is NULL");
+ return (EINVAL);
+ }
+
+ vaddr = (unsigned long) request.virtual;
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ list32 = (drm_buf_pub_32_t *)(uintptr_t)request32.list;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (DRM_COPY_TO_USER(&list32[i].idx,
+ &dma->buflist[i]->idx, sizeof (list32[0].idx))) {
+ return (EFAULT);
+ }
+ if (DRM_COPY_TO_USER(&list32[i].total,
+ &dma->buflist[i]->total,
+ sizeof (list32[0].total))) {
+ return (EFAULT);
+ }
+ if (DRM_COPY_TO_USER(&list32[i].used,
+ &zero, sizeof (zero))) {
+ return (EFAULT);
+ }
+ address32 = vaddr + dma->buflist[i]->offset; /* *** */
+ ret_tmp = DRM_COPY_TO_USER(&list32[i].address,
+ &address32, sizeof (list32[0].address));
+ if (ret_tmp)
+ return (EFAULT);
+ }
+ goto done;
+ }
+#endif
+
+ ASSERT(ddi_model_convert_from(mode & FMODELS) != DDI_MODEL_ILP32);
+ for (i = 0; i < dma->buf_count; i++) {
+ if (DRM_COPY_TO_USER(&request.list[i].idx,
+ &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
+ return (EFAULT);
+ }
+ if (DRM_COPY_TO_USER(&request.list[i].total,
+ &dma->buflist[i]->total, sizeof (request.list[0].total))) {
+ return (EFAULT);
+ }
+ if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
+ sizeof (zero))) {
+ return (EFAULT);
+ }
+ address = vaddr + dma->buflist[i]->offset; /* *** */
+
+ ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
+ &address, sizeof (address));
+ if (ret_tmp) {
+ return (EFAULT);
+ }
+ }
+
+done:
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ request32.count = dma->buf_count;
+ request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &request32, sizeof (request32));
+ } else {
+#endif
+ request.count = dma->buf_count;
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &request, sizeof (request));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_cache.c b/usr/src/uts/common/io/drm/drm_cache.c
new file mode 100644
index 0000000..fe7eff0
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_cache.c
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright(c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files(the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice(including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <sys/x86_archext.h>
+#include <vm/seg_kmem.h>
+#include "drmP.h"
+
+extern void clflush_insn(caddr_t addr);
+extern void mfence_insn(void);
+
+static void
+drm_clflush_page(caddr_t page)
+{
+ unsigned int i;
+
+ if (page == NULL)
+ return;
+
+ for (i = 0; i < PAGE_SIZE; i += x86_clflush_size)
+ clflush_insn(page + i);
+ mfence_insn();
+}
+
+void
+drm_clflush_pages(caddr_t *pages, unsigned long num_pages)
+{
+
+ if (is_x86_feature(x86_featureset, X86FSET_CLFSH)) {
+ unsigned long i;
+
+ for (i = 0; i < num_pages; i++)
+ drm_clflush_page(pages[i]);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_context.c b/usr/src/uts/common/io/drm/drm_context.c
new file mode 100644
index 0000000..16c141f
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_context.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include "drm_io32.h"
+
+static inline int
+find_first_zero_bit(volatile void *p, int max)
+{
+ int b;
+ volatile int *ptr = (volatile int *)p;
+
+ for (b = 0; b < max; b += 32) {
+ if (ptr[b >> 5] != ~0) {
+ for (;;) {
+ if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
+ return (b);
+ b++;
+ }
+ }
+ }
+ return (max);
+}
+
+/*
+ * Context bitmap support
+ */
+void
+drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
+{
+ if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
+ dev->ctx_bitmap == NULL) {
+ DRM_ERROR("drm_ctxbitmap_free: Attempt to free\
+ invalid context handle: %d\n",
+ ctx_handle);
+ return;
+ }
+
+ DRM_LOCK();
+ clear_bit(ctx_handle, dev->ctx_bitmap);
+ dev->context_sareas[ctx_handle] = NULL;
+ DRM_UNLOCK();
+}
+
+/* Is supposed to return -1 if any error by calling functions */
+int
+drm_ctxbitmap_next(drm_device_t *dev)
+{
+ int bit;
+
+ if (dev->ctx_bitmap == NULL)
+ return (-1);
+
+ DRM_LOCK();
+ bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
+ if (bit >= DRM_MAX_CTXBITMAP) {
+ DRM_UNLOCK();
+ return (-1);
+ }
+
+ set_bit(bit, dev->ctx_bitmap);
+ DRM_DEBUG("drm_ctxbitmap_next: bit : %d", bit);
+ if ((bit+1) > dev->max_context) {
+ dev->max_context = (bit+1);
+ if (dev->context_sareas != NULL) {
+ drm_local_map_t **ctx_sareas;
+ ctx_sareas = drm_realloc(dev->context_sareas,
+ (dev->max_context - 1) *
+ sizeof (*dev->context_sareas),
+ dev->max_context *
+ sizeof (*dev->context_sareas),
+ DRM_MEM_MAPS);
+ if (ctx_sareas == NULL) {
+ clear_bit(bit, dev->ctx_bitmap);
+ DRM_UNLOCK();
+ return (-1);
+ }
+ dev->context_sareas = ctx_sareas;
+ dev->context_sareas[bit] = NULL;
+ } else {
+ /* max_context == 1 at this point */
+ dev->context_sareas = drm_alloc(dev->max_context *
+ sizeof (*dev->context_sareas), KM_NOSLEEP);
+ if (dev->context_sareas == NULL) {
+ clear_bit(bit, dev->ctx_bitmap);
+ DRM_UNLOCK();
+ return (-1);
+ }
+ dev->context_sareas[bit] = NULL;
+ }
+ }
+ DRM_UNLOCK();
+ DRM_DEBUG("drm_ctxbitmap_next: return %d", bit);
+ return (bit);
+}
+
+int
+drm_ctxbitmap_init(drm_device_t *dev)
+{
+ int i;
+ int temp;
+
+ DRM_LOCK();
+ dev->ctx_bitmap = drm_calloc(1, DRM_PAGE_SIZE, DRM_MEM_CTXBITMAP);
+ if (dev->ctx_bitmap == NULL) {
+ DRM_UNLOCK();
+ return (ENOMEM);
+ }
+ dev->context_sareas = NULL;
+ dev->max_context = -1;
+ DRM_UNLOCK();
+
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ temp = drm_ctxbitmap_next(dev);
+ DRM_DEBUG("drm_ctxbitmap_init : %d", temp);
+ }
+ return (0);
+}
+
+void
+drm_ctxbitmap_cleanup(drm_device_t *dev)
+{
+ DRM_LOCK();
+ if (dev->context_sareas != NULL)
+ drm_free(dev->context_sareas,
+ sizeof (*dev->context_sareas) *
+ dev->max_context,
+ DRM_MEM_MAPS);
+ drm_free(dev->ctx_bitmap, DRM_PAGE_SIZE, DRM_MEM_CTXBITMAP);
+ DRM_UNLOCK();
+}
+
+/*
+ * Per Context SAREA Support
+ */
+/*ARGSUSED*/
+int
+drm_getsareactx(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_ctx_priv_map_t request;
+ drm_local_map_t *map;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_ctx_priv_map_32_t request32;
+ DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
+ sizeof (drm_ctx_priv_map_32_t));
+ request.ctx_id = request32.ctx_id;
+ request.handle = (void *)(uintptr_t)request32.handle;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
+ sizeof (request));
+
+ DRM_LOCK();
+ if (dev->max_context < 0 || request.ctx_id >= (unsigned)
+ dev->max_context) {
+ DRM_UNLOCK();
+ return (EINVAL);
+ }
+
+ map = dev->context_sareas[request.ctx_id];
+ DRM_UNLOCK();
+
+ if (!map)
+ return (EINVAL);
+
+ request.handle = map->handle;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_ctx_priv_map_32_t request32;
+ request32.ctx_id = request.ctx_id;
+ request32.handle = (caddr32_t)(uintptr_t)request.handle;
+ DRM_COPYTO_WITH_RETURN((void *)data, &request32,
+ sizeof (drm_ctx_priv_map_32_t));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &request, sizeof (request));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_setsareactx(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_ctx_priv_map_t request;
+ drm_local_map_t *map = NULL;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_ctx_priv_map_32_t request32;
+
+ DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
+ sizeof (drm_ctx_priv_map_32_t));
+ request.ctx_id = request32.ctx_id;
+ request.handle = (void *)(uintptr_t)request32.handle;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request,
+ (void *)data, sizeof (request));
+
+ DRM_LOCK();
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (map->handle == request.handle) {
+ if (dev->max_context < 0)
+ goto bad;
+ if (request.ctx_id >= (unsigned)dev->max_context)
+ goto bad;
+ dev->context_sareas[request.ctx_id] = map;
+ DRM_UNLOCK();
+ return (0);
+ }
+ }
+
+bad:
+ DRM_UNLOCK();
+ return (EINVAL);
+}
+
+/*
+ * The actual DRM context handling routines
+ */
+int
+drm_context_switch(drm_device_t *dev, int old, int new)
+{
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("drm_context_switch: Reentering -- FIXME");
+ return (EBUSY);
+ }
+
+ DRM_DEBUG("drm_context_switch: Context switch from %d to %d",
+ old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return (0);
+ }
+
+ return (0);
+}
+
+int
+drm_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR(
+ "drm_context_switch_complete: Lock not held");
+ }
+ /*
+ * If a context switch is ever initiated
+ * when the kernel holds the lock, release
+ * that lock here.
+ */
+ clear_bit(0, &dev->context_flag);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_resctx(DRM_IOCTL_ARGS)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_ctx_res_32_t res32;
+ DRM_COPYFROM_WITH_RETURN(&res32, (void *)data, sizeof (res32));
+ res.count = res32.count;
+ res.contexts = (drm_ctx_t *)(uintptr_t)res32.contexts;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&res, (void *)data, sizeof (res));
+
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ bzero(&ctx, sizeof (ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ DRM_COPYTO_WITH_RETURN(&res.contexts[i],
+ &ctx, sizeof (ctx));
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_ctx_res_32_t res32;
+ res32.count = res.count;
+ res32.contexts = (caddr32_t)(uintptr_t)res.contexts;
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &res32,
+ sizeof (drm_ctx_res_32_t));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &res, sizeof (res));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_addctx(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+
+ ctx.handle = drm_ctxbitmap_next(dev);
+ if (ctx.handle == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = drm_ctxbitmap_next(dev);
+ }
+ if (ctx.handle == (drm_context_t)-1) {
+ return (ENOMEM);
+ }
+
+ if (dev->driver->context_ctor && ctx.handle != DRM_KERNEL_CONTEXT) {
+ dev->driver->context_ctor(dev, ctx.handle);
+ }
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &ctx, sizeof (ctx));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_modctx(DRM_IOCTL_ARGS)
+{
+ /* This does nothing */
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_getctx(DRM_IOCTL_ARGS)
+{
+ drm_ctx_t ctx;
+
+ DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+
+ /* This is 0, because we don't handle any context flags */
+ ctx.flags = 0;
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &ctx, sizeof (ctx));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_switchctx(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+
+ DRM_DEBUG("drm_switchctx: %d", ctx.handle);
+ return (drm_context_switch(dev, dev->last_context, ctx.handle));
+}
+
+/*ARGSUSED*/
+int
+drm_newctx(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+
+ DRM_DEBUG("drm_newctx: %d", ctx.handle);
+ (void) drm_context_switch_complete(dev, ctx.handle);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_rmctx(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+
+ DRM_DEBUG("drm_rmctx : %d", ctx.handle);
+ if (ctx.handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor) {
+ DRM_LOCK();
+ dev->driver->context_dtor(dev, ctx.handle);
+ DRM_UNLOCK();
+ }
+
+ drm_ctxbitmap_free(dev, ctx.handle);
+ }
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_dma.c b/usr/src/uts/common/io/drm/drm_dma.c
new file mode 100644
index 0000000..589c486
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_dma.c
@@ -0,0 +1,157 @@
+/*
+ * drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ */
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+
+int
+drm_dma_setup(drm_device_t *dev)
+{
+ int i;
+ drm_buf_entry_t *pbuf;
+
+ dev->dma = drm_calloc(1, sizeof (*dev->dma), DRM_MEM_DMA);
+ if (dev->dma == NULL)
+ return (ENOMEM);
+
+ mutex_init(&dev->dma_lock, NULL, MUTEX_DRIVER, NULL);
+ pbuf = &(dev->dma->bufs[0]);
+ for (i = 0; i <= DRM_MAX_ORDER; i++, pbuf++)
+ bzero(pbuf, sizeof (drm_buf_entry_t));
+
+ return (0);
+}
+
+void
+drm_dma_takedown(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i, j;
+
+ if (dma == NULL)
+ return;
+
+ /* Clear dma buffers */
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].seg_count) {
+ drm_free(dma->bufs[i].seglist,
+ dma->bufs[i].seg_count *
+ sizeof (*dma->bufs[0].seglist), DRM_MEM_SEGS);
+ }
+
+ for (j = 0; j < dma->bufs[i].buf_count; j++) {
+ if (dma->bufs[i].buflist[j].dev_private) {
+ drm_free(dma->bufs[i].buflist[j].dev_private,
+ dma->bufs[i].buflist[j].dev_priv_size,
+ DRM_MEM_BUFS);
+ }
+ }
+ if (dma->bufs[i].buf_count)
+ drm_free(dma->bufs[i].buflist,
+ dma->bufs[i].buf_count *
+ sizeof (*dma->bufs[0].buflist), DRM_MEM_BUFS);
+ }
+ if (dma->buflist) {
+ drm_free(dma->buflist,
+ dma->buf_count *sizeof (*dma->buflist),
+ DRM_MEM_BUFS);
+ }
+
+ if (dma->pagelist) {
+ drm_free(dma->pagelist,
+ dma->page_count *sizeof (*dma->pagelist),
+ DRM_MEM_PAGES);
+ }
+
+ drm_free(dev->dma, sizeof (*dev->dma), DRM_MEM_DRIVER);
+ dev->dma = NULL;
+ mutex_destroy(&dev->dma_lock);
+}
+
+
+/*ARGSUSED*/
+void
+drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
+{
+ if (!buf)
+ return;
+
+ buf->pending = 0;
+ buf->filp = NULL;
+ buf->used = 0;
+}
+
+void
+drm_reclaim_buffers(drm_device_t *dev, drm_file_t *fpriv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma)
+ return;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (dma->buflist[i]->filp == fpriv) {
+ switch (dma->buflist[i]->list) {
+ case DRM_LIST_NONE:
+ drm_free_buffer(dev, dma->buflist[i]);
+ break;
+ case DRM_LIST_WAIT:
+ dma->buflist[i]->list = DRM_LIST_RECLAIM;
+ break;
+ default:
+ /* Buffer already on hardware. */
+ break;
+ }
+ }
+ }
+}
+
+/* Call into the driver-specific DMA handler */
+int
+drm_dma(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+
+ if (dev->driver->dma_ioctl) {
+ return (dev->driver->dma_ioctl(dev, data, fpriv, mode));
+ } else {
+ return (EINVAL);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_drawable.c b/usr/src/uts/common/io/drm/drm_drawable.c
new file mode 100644
index 0000000..3ccc443
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_drawable.c
@@ -0,0 +1,74 @@
+/*
+ * drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+
+/*ARGSUSED*/
+int
+drm_adddraw(DRM_IOCTL_ARGS)
+{
+ drm_draw_t draw;
+
+ draw.handle = 0; /* NOOP */
+ DRM_DEBUG("draw.handle = %d\n", draw.handle);
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &draw, sizeof (draw));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_rmdraw(DRM_IOCTL_ARGS)
+{
+ return (0);
+}
+
+/*ARGSUSED*/
+drm_drawable_info_t *
+drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
+ return (NULL);
+}
+
+/*ARGSUSED*/
+int
+drm_update_draw(DRM_IOCTL_ARGS)
+{
+ DRM_DEBUG("drm_update_draw\n");
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_drv.c b/usr/src/uts/common/io/drm/drm_drv.c
new file mode 100644
index 0000000..ae86db5
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_drv.c
@@ -0,0 +1,577 @@
+/*
+ * drm_drv.h -- Generic driver template -*- linux-c -*-
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+
+int drm_debug_flag = 1;
+
+#define DRIVER_IOCTL_COUNT 256
+drm_ioctl_desc_t drm_ioctls[DRIVER_IOCTL_COUNT] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] =
+ {drm_version, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] =
+ {drm_getunique, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] =
+ {drm_getmagic, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] =
+ {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] =
+ {drm_getmap, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] =
+ {drm_getclient, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] =
+ {drm_getstats, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] =
+ {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_MODESET_CTL)] =
+ {drm_modeset_ctl, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GEM_CLOSE)] =
+ {drm_gem_close_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GEM_FLINK)] =
+ {drm_gem_flink_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_GEM_OPEN)] =
+ {drm_gem_open_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] =
+ {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] =
+ {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] =
+ {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] =
+ {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] =
+ {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] =
+ {drm_rmmap_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] =
+ {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] =
+ {drm_getsareactx, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] =
+ {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] =
+ {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] =
+ {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] =
+ {drm_getctx, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] =
+ {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] =
+ {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] =
+ {drm_resctx, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] =
+ {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] =
+ {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] =
+ {drm_lock, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] =
+ {drm_unlock, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] =
+ {drm_noop, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] =
+ {drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] =
+ {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] =
+ {drm_infobufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] =
+ {drm_mapbufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] =
+ {drm_freebufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] =
+ {drm_dma, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] =
+ {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] =
+ {drm_agp_acquire, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] =
+ {drm_agp_release, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] =
+ {drm_agp_enable, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] =
+ {drm_agp_info, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] =
+ {drm_agp_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] =
+ {drm_agp_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] =
+ {drm_agp_bind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] =
+ {drm_agp_unbind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] =
+ {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] =
+ {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] =
+ {drm_wait_vblank, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] =
+ {drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+};
+
+extern void idr_list_free(struct idr_list *head);
+
+const char *
+drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
+{
+ int i = 0;
+ for (i = 0; idlist[i].vendor != 0; i++) {
+ if ((idlist[i].vendor == vendor) &&
+ (idlist[i].device == device)) {
+ return (idlist[i].name);
+ }
+ }
+ return ((char *)NULL);
+}
+
+static int
+drm_firstopen(drm_device_t *dev)
+{
+ int i;
+ int retval;
+ drm_local_map_t *map;
+
+ /* prebuild the SAREA */
+ retval = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+ _DRM_CONTAINS_LOCK, &map);
+ if (retval != 0) {
+ DRM_ERROR("firstopen: failed to prebuild SAREA");
+ return (retval);
+ }
+
+ if (dev->driver->use_agp) {
+ DRM_DEBUG("drm_firstopen: use_agp=%d", dev->driver->use_agp);
+ if (drm_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (dev->driver->require_agp && dev->agp == NULL) {
+ DRM_ERROR("couldn't initialize AGP");
+ return (EIO);
+ }
+ }
+
+ if (dev->driver->firstopen)
+ retval = dev->driver->firstopen(dev);
+
+ if (retval != 0) {
+ DRM_ERROR("drm_firstopen: driver-specific firstopen failed");
+ return (retval);
+ }
+
+ dev->buf_use = 0;
+
+ if (dev->driver->use_dma) {
+ i = drm_dma_setup(dev);
+ if (i != 0)
+ return (i);
+ }
+ dev->counters = 6;
+ dev->types[0] = _DRM_STAT_LOCK;
+ dev->types[1] = _DRM_STAT_OPENS;
+ dev->types[2] = _DRM_STAT_CLOSES;
+ dev->types[3] = _DRM_STAT_IOCTLS;
+ dev->types[4] = _DRM_STAT_LOCKS;
+ dev->types[5] = _DRM_STAT_UNLOCKS;
+
+ for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
+ *(&dev->counts[i]) = 0;
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+
+ dev->irq_enabled = 0;
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+
+ return (0);
+}
+
+/* Free resources associated with the DRM on the last close. */
+static int
+drm_lastclose(drm_device_t *dev)
+{
+ drm_magic_entry_t *pt, *next;
+ drm_local_map_t *map, *mapsave;
+ int i;
+
+ DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+
+ if (dev->driver->lastclose != NULL)
+ dev->driver->lastclose(dev);
+
+ if (dev->irq_enabled)
+ (void) drm_irq_uninstall(dev);
+
+ if (dev->unique) {
+ drm_free(dev->unique, dev->unique_len + 1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof (*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /*
+ * Remove AGP resources, but leave dev->agp
+ * intact until drm_cleanup is called.
+ */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound)
+ (void) drm_agp_unbind_memory(
+ (unsigned long)entry->handle, dev);
+ (void) drm_agp_free_memory(entry->handle, dev);
+ drm_free(entry, sizeof (*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired)
+ (void) drm_agp_do_release(dev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ drm_agp_fini(dev);
+ }
+
+ if (dev->sg != NULL) {
+ drm_sg_mem_t *entry;
+ entry = dev->sg;
+ dev->sg = NULL;
+ drm_sg_cleanup(dev, entry);
+ }
+
+
+ /* Clean up maps that weren't set up by the driver. */
+ TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
+ if (!map->kernel_owned)
+ drm_rmmap(dev, map);
+ }
+
+ drm_dma_takedown(dev);
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.filp = NULL;
+
+ mutex_enter(&(dev->lock.lock_mutex));
+ cv_broadcast(&(dev->lock.lock_cv));
+ mutex_exit(&(dev->lock.lock_mutex));
+ }
+
+ return (0);
+}
+
+static int
+drm_load(drm_device_t *dev)
+{
+ int retcode;
+
+ cv_init(&(dev->lock.lock_cv), NULL, CV_DRIVER, NULL);
+ mutex_init(&(dev->lock.lock_mutex), NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&(dev->dev_lock), "drmdev", MUTEX_DRIVER, NULL);
+ mutex_init(&dev->irq_lock, "drmirq", MUTEX_DRIVER,
+ (void *)dev->intr_block);
+ mutex_init(&dev->drw_lock, "drmdrw", MUTEX_DRIVER, NULL);
+ mutex_init(&dev->tasklet_lock, "drmtsk", MUTEX_DRIVER, NULL);
+
+ dev->irq = pci_get_irq(dev);
+ dev->pci_vendor = pci_get_vendor(dev);
+ dev->pci_device = pci_get_device(dev);
+
+ TAILQ_INIT(&dev->maplist);
+ TAILQ_INIT(&dev->minordevs);
+ TAILQ_INIT(&dev->files);
+ if (dev->driver->load != NULL) {
+ retcode = dev->driver->load(dev, 0);
+ if (retcode != 0) {
+ DRM_ERROR("drm_load: failed\n");
+ goto error;
+ }
+ }
+
+ retcode = drm_ctxbitmap_init(dev);
+ if (retcode != 0) {
+ DRM_ERROR("drm_load: Cannot allocate memory for ctx bitmap");
+ goto error;
+ }
+
+ if (dev->driver->use_gem == 1) {
+ retcode = drm_gem_init(dev);
+ if (retcode) {
+ DRM_ERROR("Cannot initialize graphics execution "
+ "manager (GEM)\n");
+ goto error;
+ }
+ }
+
+ if (drm_init_kstats(dev)) {
+ DRM_ERROR("drm_attach => drm_load: init kstats error");
+ retcode = EFAULT;
+ goto error;
+ }
+
+ DRM_INFO("!drm: Initialized %s %d.%d.%d %s ",
+ dev->driver->driver_name,
+ dev->driver->driver_major,
+ dev->driver->driver_minor,
+ dev->driver->driver_patchlevel,
+ dev->driver->driver_date);
+ return (0);
+
+error:
+ DRM_LOCK();
+ (void) drm_lastclose(dev);
+ DRM_UNLOCK();
+ cv_destroy(&(dev->lock.lock_cv));
+ mutex_destroy(&(dev->lock.lock_mutex));
+ mutex_destroy(&dev->irq_lock);
+ mutex_destroy(&(dev->dev_lock));
+ mutex_destroy(&dev->drw_lock);
+ mutex_destroy(&dev->tasklet_lock);
+
+ return (retcode);
+}
+
+/* called when cleanup this module */
+static void
+drm_unload(drm_device_t *dev)
+{
+ drm_local_map_t *map;
+
+ drm_vblank_cleanup(dev);
+
+ drm_ctxbitmap_cleanup(dev);
+
+ if (dev->driver->use_gem == 1) {
+ idr_list_free(&dev->object_name_idr);
+ mutex_destroy(&dev->object_name_lock);
+ }
+
+ DRM_LOCK();
+ (void) drm_lastclose(dev);
+ DRM_UNLOCK();
+
+ while ((map = TAILQ_FIRST(&dev->maplist)) != NULL) {
+ drm_rmmap(dev, map);
+ }
+
+ if (dev->driver->unload != NULL)
+ dev->driver->unload(dev);
+
+ drm_mem_uninit();
+ cv_destroy(&dev->lock.lock_cv);
+ mutex_destroy(&dev->lock.lock_mutex);
+ mutex_destroy(&dev->irq_lock);
+ mutex_destroy(&dev->dev_lock);
+ mutex_destroy(&dev->drw_lock);
+ mutex_destroy(&dev->tasklet_lock);
+
+ dev->gtt_total = 0;
+ atomic_set(&dev->pin_memory, 0);
+ DRM_ERROR("drm_unload");
+}
+
+
+/*ARGSUSED*/
+int
+drm_open(drm_device_t *dev, drm_cminor_t *mp, int openflags,
+ int otyp, cred_t *credp)
+{
+ int retcode;
+
+ retcode = drm_open_helper(dev, mp, openflags, otyp, credp);
+
+ if (!retcode) {
+ atomic_inc_32(&dev->counts[_DRM_STAT_OPENS]);
+ DRM_LOCK();
+ if (!dev->open_count ++)
+ retcode = drm_firstopen(dev);
+ DRM_UNLOCK();
+ }
+
+ return (retcode);
+}
+
+/*ARGSUSED*/
+int
+drm_close(drm_device_t *dev, int minor, int flag, int otyp,
+ cred_t *credp)
+{
+ drm_cminor_t *mp;
+ drm_file_t *fpriv;
+ int retcode = 0;
+
+ DRM_LOCK();
+ mp = drm_find_file_by_minor(dev, minor);
+ if (!mp) {
+ DRM_UNLOCK();
+ DRM_ERROR("drm_close: can't find authenticator");
+ return (EACCES);
+ }
+
+ fpriv = mp->fpriv;
+ ASSERT(fpriv);
+
+ if (--fpriv->refs != 0)
+ goto done;
+
+ if (dev->driver->preclose != NULL)
+ dev->driver->preclose(dev, fpriv);
+
+ /*
+ * Begin inline drm_release
+ */
+ DRM_DEBUG("drm_close :pid = %d , open_count = %d",
+ DRM_CURRENTPID, dev->open_count);
+
+ if (dev->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+ dev->lock.filp == fpriv) {
+ DRM_DEBUG("Process %d dead, freeing lock for context %d",
+ DRM_CURRENTPID,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ if (dev->driver->reclaim_buffers_locked != NULL)
+ dev->driver->reclaim_buffers_locked(dev, fpriv);
+ (void) drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ } else if (dev->driver->reclaim_buffers_locked != NULL &&
+ dev->lock.hw_lock != NULL) {
+ DRM_ERROR("drm_close: "
+ "retake lock not implemented yet");
+ }
+
+ if (dev->driver->use_dma) {
+ drm_reclaim_buffers(dev, fpriv);
+ }
+
+ if (dev->driver->use_gem == 1) {
+ drm_gem_release(dev, fpriv);
+ }
+
+ if (dev->driver->postclose != NULL) {
+ dev->driver->postclose(dev, fpriv);
+ }
+ TAILQ_REMOVE(&dev->files, fpriv, link);
+ drm_free(fpriv, sizeof (*fpriv), DRM_MEM_FILES);
+
+done:
+ atomic_inc_32(&dev->counts[_DRM_STAT_CLOSES]);
+
+ TAILQ_REMOVE(&dev->minordevs, mp, link);
+ drm_free(mp, sizeof (*mp), DRM_MEM_FILES);
+
+ if (--dev->open_count == 0) {
+ retcode = drm_lastclose(dev);
+ }
+ DRM_UNLOCK();
+
+ return (retcode);
+}
+
+int
+drm_attach(drm_device_t *dev)
+{
+ return (drm_load(dev));
+}
+
+int
+drm_detach(drm_device_t *dev)
+{
+ drm_unload(dev);
+ drm_fini_kstats(dev);
+ return (DDI_SUCCESS);
+}
+
+static int
+drm_get_businfo(drm_device_t *dev)
+{
+ dev->irq = pci_get_irq(dev);
+ if (dev->irq == -1) {
+ DRM_ERROR("drm_get_businfo: get irq error");
+ return (DDI_FAILURE);
+ }
+ /* XXX Fix domain number (alpha hoses) */
+ dev->pci_domain = 0;
+ if (pci_get_info(dev, &dev->pci_bus,
+ &dev->pci_slot, &dev->pci_func) != DDI_SUCCESS) {
+ DRM_ERROR("drm_get_businfo: get bus slot func error ");
+ return (DDI_FAILURE);
+ }
+ DRM_DEBUG("drm_get_businfo: pci bus: %d, pci slot :%d pci func %d",
+ dev->pci_bus, dev->pci_slot, dev->pci_func);
+ return (DDI_SUCCESS);
+}
+
+int
+drm_probe(drm_device_t *dev, drm_pci_id_list_t *idlist)
+{
+ const char *s = NULL;
+ int vendor, device;
+
+ vendor = pci_get_vendor(dev);
+ device = pci_get_device(dev);
+
+ s = drm_find_description(vendor, device, idlist);
+ if (s != NULL) {
+ dev->desc = s;
+ if (drm_get_businfo(dev) != DDI_SUCCESS) {
+ DRM_ERROR("drm_probe: drm get bus info error");
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+ }
+ return (DDI_FAILURE);
+}
diff --git a/usr/src/uts/common/io/drm/drm_fops.c b/usr/src/uts/common/io/drm/drm_fops.c
new file mode 100644
index 0000000..da61e4d
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_fops.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/* BEGIN CSTYLED */
+
+/* drm_fops.h -- File operations for DRM -*- linux-c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
+ */
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Daryll Strauss <daryll@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/* END CSTYLED */
+
+#include "drmP.h"
+
+/*ARGSUSED*/
+drm_file_t *
+drm_find_file_by_proc(drm_device_t *dev, cred_t *credp)
+{
+ pid_t pid = ddi_get_pid();
+ drm_file_t *priv;
+
+ TAILQ_FOREACH(priv, &dev->files, link)
+ if (priv->pid == pid)
+ return (priv);
+ return (NULL);
+}
+
+
+drm_cminor_t *
+drm_find_file_by_minor(drm_device_t *dev, int minor)
+{
+ drm_cminor_t *mp;
+
+ TAILQ_FOREACH(mp, &dev->minordevs, link) {
+ if (mp->minor == minor)
+ return (mp);
+ }
+ return (NULL);
+}
+
+/* drm_open_helper is called whenever a process opens /dev/drm. */
+/*ARGSUSED*/
+int
+drm_open_helper(drm_device_t *dev, drm_cminor_t *mp, int flags,
+ int otyp, cred_t *credp)
+{
+ drm_file_t *priv;
+ pid_t pid;
+ int retcode;
+
+ if (flags & FEXCL)
+ return (EBUSY); /* No exclusive opens */
+ dev->flags = flags;
+
+ pid = ddi_get_pid();
+ DRM_DEBUG("drm_open_helper :pid = %d", pid);
+
+ DRM_LOCK();
+ priv = drm_find_file_by_proc(dev, credp);
+ if (priv) {
+ priv->refs++;
+ } else {
+ priv = drm_alloc(sizeof (*priv), DRM_MEM_FILES);
+ if (priv == NULL) {
+ DRM_UNLOCK();
+ return (ENOMEM);
+ }
+ bzero(priv, sizeof (*priv));
+
+ priv->uid = crgetsuid(credp);
+ priv->pid = pid;
+
+ priv->refs = 1;
+ priv->minor = 5; /* just for hack */
+ priv->ioctl_count = 0;
+
+ /* for compatibility root is always authenticated */
+ priv->authenticated = DRM_SUSER(credp);
+
+ if (dev->driver->use_gem == 1)
+ drm_gem_open(priv);
+
+ if (dev->driver->open) {
+ retcode = dev->driver->open(dev, priv);
+ if (retcode != 0) {
+ drm_free(priv, sizeof (*priv), DRM_MEM_FILES);
+ DRM_UNLOCK();
+ return (retcode);
+ }
+ }
+
+ /* first opener automatically becomes master */
+ priv->master = TAILQ_EMPTY(&dev->files);
+ TAILQ_INSERT_TAIL(&dev->files, priv, link);
+ }
+ mp->fpriv = priv;
+ DRM_UNLOCK();
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_gem.c b/usr/src/uts/common/io/drm/drm_gem.c
new file mode 100644
index 0000000..69c5fc1
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_gem.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <vm/anon.h>
+#include <vm/seg_kmem.h>
+#include <vm/seg_kp.h>
+#include <vm/seg_map.h>
+#include <sys/fcntl.h>
+#include <sys/vnode.h>
+#include <sys/file.h>
+#include <sys/bitmap.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <gfx_private.h>
+#include "drmP.h"
+#include "drm.h"
+
+/*
+ * @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file. However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ * default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ * handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs(called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls. The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+void
+idr_list_init(struct idr_list *head)
+{
+ struct idr_list *entry;
+ /* HASH for accelerate */
+ entry = kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
+ * sizeof (struct idr_list), KM_SLEEP);
+ head->next = entry;
+ for (int i = 0; i < DRM_GEM_OBJIDR_HASHNODE; i++) {
+ INIT_LIST_HEAD(&entry[i]);
+ }
+}
+
+int
+idr_list_get_new_above(struct idr_list *head,
+ struct drm_gem_object *obj,
+ int *handlep)
+{
+ struct idr_list *entry;
+ int key;
+ entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
+ key = obj->name % DRM_GEM_OBJIDR_HASHNODE;
+ list_add(entry, &head->next[key], NULL);
+ entry->obj = obj;
+ entry->handle = obj->name;
+ *handlep = obj->name;
+ return (0);
+}
+
+struct drm_gem_object *
+idr_list_find(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+
+ list_for_each(entry, &head->next[key]) {
+ if (entry->handle == name)
+ return (entry->obj);
+ }
+ return (NULL);
+}
+
+int
+idr_list_remove(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry, *temp;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ if (entry->handle == name) {
+ list_del(entry);
+ kmem_free(entry, sizeof (*entry));
+ return (0);
+ }
+ }
+ DRM_ERROR("Failed to remove the object %d", name);
+ return (-1);
+}
+
+void
+idr_list_free(struct idr_list *head)
+{
+ struct idr_list *entry, *temp;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ list_del(entry);
+ kmem_free(entry, sizeof (*entry));
+ }
+ }
+ kmem_free(head->next,
+ DRM_GEM_OBJIDR_HASHNODE * sizeof (struct idr_list));
+ head->next = NULL;
+}
+
+int
+idr_list_empty(struct idr_list *head)
+{
+ int empty;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ empty = list_empty(&(head)->next[key]);
+ if (!empty)
+ return (empty);
+ }
+ return (1);
+}
+
+static uint32_t shfile_name = 0;
+#define SHFILE_NAME_MAX 0xffffffff
+
+/*
+ * will be set to 1 for 32 bit x86 systems only, in startup.c
+ */
+extern int segkp_fromheap;
+extern ulong_t *segkp_bitmap;
+
+void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ atomic_inc(&obj->refcount);
+}
+
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ atomic_sub(1, &obj->refcount);
+ if (obj->refcount == 0)
+ drm_gem_object_free(obj);
+}
+
+void
+drm_gem_object_handle_reference(struct drm_gem_object *obj)
+{
+ drm_gem_object_reference(obj);
+ atomic_inc(&obj->handlecount);
+}
+
+void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ atomic_sub(1, &obj->handlecount);
+ if (obj->handlecount == 0)
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference(obj);
+}
+
+/*
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+ mutex_init(&dev->object_name_lock, NULL, MUTEX_DRIVER, NULL);
+ idr_list_init(&dev->object_name_idr);
+
+ atomic_set(&dev->object_count, 0);
+ atomic_set(&dev->object_memory, 0);
+ atomic_set(&dev->pin_count, 0);
+ atomic_set(&dev->pin_memory, 0);
+ atomic_set(&dev->gtt_count, 0);
+ atomic_set(&dev->gtt_memory, 0);
+ return (0);
+}
+
+/*
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ static ddi_dma_attr_t dma_attr = {
+ DMA_ATTR_V0,
+ 0U, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ 4096, /* dma_attr_align */
+ 0x1fffU, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 4, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+ };
+ static ddi_device_acc_attr_t acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC
+ };
+ struct drm_gem_object *obj;
+ ddi_dma_cookie_t cookie;
+ uint_t cookie_cnt;
+ drm_local_map_t *map;
+
+ pgcnt_t real_pgcnt, pgcnt = btopr(size);
+ uint32_t paddr, cookie_end;
+ int i, n;
+
+ obj = kmem_zalloc(sizeof (struct drm_gem_object), KM_NOSLEEP);
+ if (obj == NULL)
+ return (NULL);
+
+ obj->dev = dev;
+ obj->flink = 0;
+ obj->size = size;
+
+ if (shfile_name == SHFILE_NAME_MAX) {
+ DRM_ERROR("No name space for object");
+ goto err1;
+ } else {
+ obj->name = ++shfile_name;
+ }
+
+ dma_attr.dma_attr_sgllen = (int)pgcnt;
+
+ if (ddi_dma_alloc_handle(dev->dip, &dma_attr,
+ DDI_DMA_DONTWAIT, NULL, &obj->dma_hdl)) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_alloc_handle failed");
+ goto err1;
+ }
+ if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), &acc_attr,
+ IOMEM_DATA_UC_WR_COMBINE, DDI_DMA_DONTWAIT, NULL,
+ &obj->kaddr, &obj->real_size, &obj->acc_hdl)) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_mem_alloc failed");
+ goto err2;
+ }
+ if (ddi_dma_addr_bind_handle(obj->dma_hdl, NULL,
+ obj->kaddr, obj->real_size, DDI_DMA_RDWR,
+ DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_cnt)
+ != DDI_DMA_MAPPED) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_addr_bind_handle failed");
+ goto err3;
+ }
+
+ real_pgcnt = btopr(obj->real_size);
+
+ obj->pfnarray = kmem_zalloc(real_pgcnt * sizeof (pfn_t), KM_NOSLEEP);
+ if (obj->pfnarray == NULL) {
+ goto err4;
+ }
+ for (n = 0, i = 1; ; i++) {
+ for (paddr = cookie.dmac_address,
+ cookie_end = cookie.dmac_address + cookie.dmac_size;
+ paddr < cookie_end;
+ paddr += PAGESIZE) {
+ obj->pfnarray[n++] = btop(paddr);
+ if (n >= real_pgcnt)
+ goto addmap;
+ }
+ if (i >= cookie_cnt)
+ break;
+ ddi_dma_nextcookie(obj->dma_hdl, &cookie);
+ }
+
+addmap:
+ map = drm_alloc(sizeof (struct drm_local_map), DRM_MEM_MAPS);
+ if (map == NULL) {
+ goto err5;
+ }
+
+ map->handle = obj;
+ map->offset = (uintptr_t)map->handle;
+ map->offset &= 0xffffffffUL;
+ map->dev_addr = map->handle;
+ map->size = obj->real_size;
+ map->type = _DRM_TTM;
+ map->flags = _DRM_WRITE_COMBINING | _DRM_REMOVABLE;
+ map->drm_umem_cookie =
+ gfxp_umem_cookie_init(obj->kaddr, obj->real_size);
+ if (map->drm_umem_cookie == NULL) {
+ goto err6;
+ }
+
+ obj->map = map;
+
+ atomic_set(&obj->refcount, 1);
+ atomic_set(&obj->handlecount, 1);
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0) {
+ goto err7;
+ }
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+
+ return (obj);
+
+err7:
+ gfxp_umem_cookie_destroy(map->drm_umem_cookie);
+err6:
+ drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
+err5:
+ kmem_free(obj->pfnarray, real_pgcnt * sizeof (pfn_t));
+err4:
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+err3:
+ ddi_dma_mem_free(&obj->acc_hdl);
+err2:
+ ddi_dma_free_handle(&obj->dma_hdl);
+err1:
+ kmem_free(obj, sizeof (struct drm_gem_object));
+
+ return (NULL);
+}
+
+/*
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ int err;
+ /*
+ * This is gross. The idr system doesn't let us try a delete and
+ * return an error code. It just spews if you fail at deleting.
+ * So, we have to grab a lock around finding the object and then
+ * doing the delete on it and dropping the refcount, or the user
+ * could race us to double-decrement the refcount and cause a
+ * use-after-free later. Given the frequency of our handle lookups,
+ * we may want to use ida for number allocation and a hash table
+ * for the pointers, anyway.
+ */
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_list_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ DRM_ERROR("obj %d is not in tne list, failed to close", handle);
+ return (EINVAL);
+ }
+ dev = obj->dev;
+
+ /* Release reference and decrement refcount. */
+ err = idr_list_remove(&filp->object_idr, handle);
+ if (err == -1)
+ DRM_ERROR("%s", __func__);
+
+ spin_unlock(&filp->table_lock);
+
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return (0);
+}
+
+/*
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep)
+{
+ int ret;
+
+ /*
+ * Get the user-visible handle using idr.
+ */
+again:
+ /* ensure there is space available to allocate a handle */
+
+ /* do the allocation under our spinlock */
+ spin_lock(&file_priv->table_lock);
+ ret = idr_list_get_new_above(&file_priv->object_idr, obj, handlep);
+ spin_unlock(&file_priv->table_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0) {
+ DRM_ERROR("Failed to create handle");
+ return (ret);
+ }
+
+ drm_gem_object_handle_reference(obj);
+ return (0);
+}
+
+/* Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *filp,
+ int handle)
+{
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_list_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ DRM_ERROR("object_lookup failed, handle %d", handle);
+ return (NULL);
+ }
+
+ drm_gem_object_reference(obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return (obj);
+}
+
+/*
+ * Releases the handle to an mm object.
+ */
+/*ARGSUSED*/
+int
+drm_gem_close_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_gem_close args;
+ int ret;
+
+ if (!(dev->driver->use_gem == 1))
+ return (ENODEV);
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (void *)data, sizeof (args));
+
+ ret = drm_gem_handle_delete(fpriv, args.handle);
+
+ return (ret);
+}
+
+/*
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+/*ARGSUSED*/
+int
+drm_gem_flink_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_gem_flink args;
+ struct drm_gem_object *obj;
+ int ret, handle;
+
+ if (!(dev->driver->use_gem == 1))
+ return (ENODEV);
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (void *)data, sizeof (args));
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return (EINVAL);
+ handle = args.handle;
+ spin_lock(&dev->object_name_lock);
+ if (!obj->flink) {
+ /* only creat a node in object_name_idr, no update anything */
+ ret = idr_list_get_new_above(&dev->object_name_idr,
+ obj, &handle);
+ obj->flink = obj->name;
+ /* Allocate a reference for the name table. */
+ drm_gem_object_reference(obj);
+ }
+ /*
+ * Leave the reference from the lookup around as the
+ * name table now holds one
+ */
+ args.name = obj->name;
+
+ spin_unlock(&dev->object_name_lock);
+ ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
+ if (ret != 0)
+ DRM_ERROR(" gem flink error! %d", ret);
+
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ return (ret);
+}
+
+/*
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+/*ARGSUSED*/
+int
+drm_gem_open_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_gem_open args;
+ struct drm_gem_object *obj;
+ int ret;
+ int handle;
+
+ if (!(dev->driver->use_gem == 1)) {
+ DRM_ERROR("Not support GEM");
+ return (ENODEV);
+ }
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (void *) data, sizeof (args));
+
+ spin_lock(&dev->object_name_lock);
+
+ obj = idr_list_find(&dev->object_name_idr, args.name);
+
+ if (obj)
+ drm_gem_object_reference(obj);
+ spin_unlock(&dev->object_name_lock);
+ if (!obj) {
+ DRM_ERROR("Can't find the obj %d", args.name);
+ return (ENOENT);
+ }
+
+ ret = drm_gem_handle_create(fpriv, obj, &handle);
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ args.handle = args.name;
+ args.size = obj->size;
+
+ ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
+ if (ret != 0)
+ DRM_ERROR(" gem open error! %d", ret);
+ return (ret);
+}
+
+/*
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_file *file_private)
+{
+ idr_list_init(&file_private->object_idr);
+ mutex_init(&file_private->table_lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+/*
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static void
+drm_gem_object_release_handle(struct drm_gem_object *obj)
+{
+ drm_gem_object_handle_unreference(obj);
+}
+
+/*
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ struct idr_list *entry;
+ spin_lock(&dev->struct_mutex);
+
+ idr_list_for_each(entry, &file_private->object_idr)
+ drm_gem_object_release_handle(entry->obj);
+
+ idr_list_free(&file_private->object_idr);
+ spin_unlock(&dev->struct_mutex);
+
+}
+
+/*
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_local_map *map = obj->map;
+
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(obj);
+
+ gfxp_umem_cookie_destroy(map->drm_umem_cookie);
+ drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
+
+ kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
+
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+ ddi_dma_mem_free(&obj->acc_hdl);
+ ddi_dma_free_handle(&obj->dma_hdl);
+
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
+ kmem_free(obj, sizeof (struct drm_gem_object));
+}
+
+/*
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ int err;
+ struct drm_device *dev = obj->dev;
+ /* Remove any name for this object */
+ spin_lock(&dev->object_name_lock);
+ if (obj->flink) {
+ err = idr_list_remove(&dev->object_name_idr, obj->name);
+ if (err == -1)
+ DRM_ERROR("%s", __func__);
+ obj->flink = 0;
+ spin_unlock(&dev->object_name_lock);
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ */
+ drm_gem_object_unreference(obj);
+ } else
+
+ spin_unlock(&dev->object_name_lock);
+
+}
diff --git a/usr/src/uts/common/io/drm/drm_io32.h b/usr/src/uts/common/io/drm/drm_io32.h
new file mode 100644
index 0000000..e710697
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_io32.h
@@ -0,0 +1,187 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DRM_IO32_H_
+#define _DRM_IO32_H_
+
+#ifdef _MULTI_DATAMODEL
+
+typedef struct drm_version_32 {
+ int version_major; /* Major version */
+ int version_minor; /* Minor version */
+ int version_patchlevel; /* Patch level */
+ uint32_t name_len; /* Length of name buffer */
+ caddr32_t name; /* Name of driver */
+ uint32_t date_len; /* Length of date buffer */
+ caddr32_t date; /* User-space buffer to hold date */
+ uint32_t desc_len; /* Length of desc buffer */
+ caddr32_t desc; /* User-space buffer to hold desc */
+} drm_version_32_t;
+
+typedef struct drm_unique_32 {
+ uint32_t unique_len; /* Length of unique */
+ caddr32_t unique; /* Unique name for driver instantiation */
+} drm_unique_32_t;
+
+typedef struct drm_ctx_priv_map_32 {
+ unsigned int ctx_id; /* Context requesting private mapping */
+ caddr32_t handle; /* Handle of map */
+} drm_ctx_priv_map_32_t;
+
+typedef struct drm_map_32 {
+ unsigned long long offset;
+ unsigned long long handle;
+ uint32_t size;
+ drm_map_type_t type;
+ drm_map_flags_t flags;
+ int mtrr;
+} drm_map_32_t;
+
+
+typedef struct drm_client_32 {
+ int idx; /* Which client desired? */
+ int auth; /* Is client authenticated? */
+ uint32_t pid; /* Process ID */
+ uint32_t uid; /* User ID */
+ uint32_t magic; /* Magic */
+ uint32_t iocs; /* Ioctl count */
+} drm_client_32_t;
+
+
+typedef struct drm_stats_32 {
+ uint32_t count;
+ struct {
+ uint32_t value;
+ drm_stat_type_t type;
+ } data[15];
+} drm_stats_32_t;
+
+
+typedef struct drm_buf_desc_32 {
+ int count; /* Number of buffers of this size */
+ int size; /* Size in bytes */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+ drm_buf_flag flags;
+
+ /*
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+ uint32_t agp_start;
+
+}drm_buf_desc_32_t;
+
+typedef struct drm_buf_free_32 {
+ int count;
+ uint32_t list;
+} drm_buf_free_32_t;
+
+/*
+ * Used by DRM_IOCTL_MAP_BUFS_32
+ */
+typedef struct drm_buf_pub_32 {
+ int idx; /* Index into the master buffer list */
+ int total; /* Buffer size */
+ int used; /* Amount of buffer in use (for DMA) */
+ uint32_t address; /* Address of buffer */
+} drm_buf_pub_32_t;
+
+typedef struct drm_buf_map_32 {
+ int count; /* Length of the buffer list */
+#if defined(__cplusplus)
+ uint32_t c_virtual;
+#else
+ uint32_t virtual; /* Mmap'd area in user-virtual */
+#endif
+ uint32_t list; /* Buffer information */
+ int fd;
+} drm_buf_map_32_t;
+
+typedef struct drm_agp_mode_32 {
+ uint32_t mode; /* AGP mode */
+} drm_agp_mode_32_t;
+
+typedef struct drm_agp_buffer32 {
+ uint32_t size; /* In bytes -- will round to page boundary */
+ uint32_t handle; /* Used for binding / unbinding */
+ uint32_t type; /* Type of memory to allocate */
+ uint32_t physical; /* Physical used by i810 */
+} drm_agp_buffer_32_t;
+
+typedef struct drm_agp_binding_32 {
+ uint32_t handle; /* From drm_agp_buffer */
+ uint32_t offset; /* In bytes -- will round to page boundary */
+} drm_agp_binding_32_t;
+
+typedef struct drm_agp_info_32 {
+ int agp_version_major;
+ int agp_version_minor;
+ uint32_t mode;
+ uint32_t aperture_base;
+ uint32_t aperture_size;
+ uint32_t memory_allowed;
+ uint32_t memory_used;
+ unsigned short id_vendor;
+ unsigned short id_device;
+} drm_agp_info_32_t;
+
+typedef struct drm_scatter_gather_32 {
+ uint32_t size; /* In bytes -- will round to page boundary */
+ uint32_t handle; /* Used for mapping/unmapping */
+} drm_scatter_gather_32_t;
+
+typedef struct drm_ctx_res_32 {
+ int count;
+ caddr32_t contexts;
+} drm_ctx_res_32_t;
+
+struct drm_wait_vblank_request_32 {
+ drm_vblank_seq_type_t type;
+ uint32_t sequence;
+ uint32_t signal;
+};
+struct drm_wait_vblank_reply_32 {
+ drm_vblank_seq_type_t type;
+ uint32_t sequence;
+ int32_t tval_sec;
+ int32_t tval_usec;
+};
+
+/*
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+typedef union drm_wait_vblank_32 {
+ struct drm_wait_vblank_request_32 request;
+ struct drm_wait_vblank_reply_32 reply;
+} drm_wait_vblank_32_t;
+
+
+#endif /* _MULTI_DATAMODEL */
+
+#endif /* _DRM_IO32_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_ioctl.c b/usr/src/uts/common/io/drm/drm_ioctl.c
new file mode 100644
index 0000000..8d504a1
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_ioctl.c
@@ -0,0 +1,424 @@
+/*
+ * drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include "drm_io32.h"
+
+/*
+ * Beginning in revision 1.1 of the DRM interface, getunique will return
+ * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
+ * before setunique has been called. The format for the bus-specific part of
+ * the unique is not defined for any other bus.
+ */
+/*ARGSUSED*/
+int
+drm_getunique(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_unique_t u1;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_unique_32_t u32;
+
+ DRM_COPYFROM_WITH_RETURN(&u32, (void *)data, sizeof (u32));
+ u1.unique_len = u32.unique_len;
+ u1.unique = (char __user *)(uintptr_t)u32.unique;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&u1, (void *)data, sizeof (u1));
+
+ if (u1.unique_len >= dev->unique_len) {
+ if (dev->unique_len == 0) {
+ DRM_ERROR("drm_getunique: dev->unique_len = 0");
+ return (EFAULT);
+ }
+ if (DRM_COPY_TO_USER(u1.unique, dev->unique, dev->unique_len))
+ return (EFAULT);
+ }
+ u1.unique_len = dev->unique_len;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_unique_32_t u32;
+
+ u32.unique_len = (uint32_t)u1.unique_len;
+ u32.unique = (caddr32_t)(uintptr_t)u1.unique;
+ DRM_COPYTO_WITH_RETURN((void *)data, &u32, sizeof (u32));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &u1, sizeof (u1));
+
+ return (0);
+}
+
+/*
+ * Deprecated in DRM version 1.1, and will return EBUSY when setversion has
+ * requested version 1.1 or greater.
+ */
+/*ARGSUSED*/
+int
+drm_setunique(DRM_IOCTL_ARGS)
+{
+ return (EINVAL);
+}
+
+
+static int
+drm_set_busid(drm_device_t *dev)
+{
+ DRM_LOCK();
+
+ if (dev->unique != NULL) {
+ DRM_UNLOCK();
+ return (EBUSY);
+ }
+
+ dev->unique_len = 20;
+ dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
+ if (dev->unique == NULL) {
+ DRM_UNLOCK();
+ return (ENOMEM);
+ }
+
+ (void) snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x",
+ dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
+
+ DRM_UNLOCK();
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_getmap(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_map_t map;
+ drm_local_map_t *mapinlist;
+ int idx;
+ int i = 0;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_map_32_t map32;
+
+ DRM_COPYFROM_WITH_RETURN(&map32, (void *)data, sizeof (map32));
+ map.offset = map32.offset;
+ map.size = map32.size;
+ map.type = map32.type;
+ map.flags = map32.flags;
+ map.handle = map32.handle;
+ map.mtrr = map32.mtrr;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&map, (void *)data, sizeof (map));
+
+ idx = (int)map.offset;
+
+ DRM_LOCK();
+ if (idx < 0) {
+ DRM_UNLOCK();
+ return (EINVAL);
+ }
+
+ TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
+ if (i == idx) {
+ map.offset = mapinlist->offset;
+ map.size = mapinlist->size;
+ map.type = mapinlist->type;
+ map.flags = mapinlist->flags;
+ map.handle = (unsigned long long)(uintptr_t)
+ mapinlist->handle;
+ map.mtrr = mapinlist->mtrr;
+ break;
+ }
+ i++;
+ }
+
+ DRM_UNLOCK();
+
+ if (mapinlist == NULL)
+ return (EINVAL);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_map_32_t map32;
+
+ map32.offset = map.offset;
+ map32.size = (uint32_t)map.size;
+ map32.type = map.type;
+ map32.flags = map.flags;
+ map32.handle = (uintptr_t)map.handle;
+ map32.mtrr = map.mtrr;
+ DRM_COPYTO_WITH_RETURN((void *)data, &map32, sizeof (map32));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &map, sizeof (map));
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_getclient(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_client_t client;
+ drm_file_t *pt;
+ int idx;
+ int i = 0;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_client_32_t client32;
+
+ DRM_COPYFROM_WITH_RETURN(&client32, (void *)data,
+ sizeof (client32));
+ client.idx = client32.idx;
+ client.auth = client32.auth;
+ client.pid = client32.pid;
+ client.uid = client32.uid;
+ client.magic = client32.magic;
+ client.iocs = client32.iocs;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&client, (void *)data,
+ sizeof (client));
+
+ idx = client.idx;
+ DRM_LOCK();
+ TAILQ_FOREACH(pt, &dev->files, link) {
+ if (i == idx) {
+ client.auth = pt->authenticated;
+ client.pid = pt->pid;
+ client.uid = pt->uid;
+ client.magic = pt->magic;
+ client.iocs = pt->ioctl_count;
+ DRM_UNLOCK();
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) ==
+ DDI_MODEL_ILP32) {
+ drm_client_32_t client32;
+
+ client32.idx = client.idx;
+ client32.auth = client.auth;
+ client32.pid = (uint32_t)client.pid;
+ client32.uid = (uint32_t)client.uid;
+ client32.magic = (uint32_t)client.magic;
+ client32.iocs = (uint32_t)client.iocs;
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &client32,
+ sizeof (client32));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data,
+ &client, sizeof (client));
+
+ return (0);
+ }
+ i++;
+ }
+ DRM_UNLOCK();
+ return (EINVAL);
+}
+
+/*ARGSUSED*/
+int
+drm_getstats(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_stats_t stats;
+ int i;
+
+ bzero(&stats, sizeof (stats));
+
+ DRM_LOCK();
+
+ for (i = 0; i < dev->counters; i++) {
+ if (dev->types[i] == _DRM_STAT_LOCK) {
+ stats.data[i].value
+ = (dev->lock.hw_lock
+ ? dev->lock.hw_lock->lock : 0);
+ } else
+ stats.data[i].value = atomic_read(&dev->counts[i]);
+ stats.data[i].type = dev->types[i];
+ }
+
+ stats.count = dev->counters;
+
+ DRM_UNLOCK();
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_stats_32_t stats32;
+ stats32.count = (uint32_t)stats.count;
+ for (i = 0; i < 15; i++) {
+ stats32.data[i].value = stats.data[i].value;
+ stats32.data[i].type = stats.data[i].type;
+ }
+ DRM_COPYTO_WITH_RETURN((void *)data, &stats32,
+ sizeof (stats32));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &stats, sizeof (stats));
+
+ return (0);
+}
+
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 2
+
+/*ARGSUSED*/
+int
+drm_setversion(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_set_version_t sv;
+ drm_set_version_t retv;
+ int if_version;
+
+ DRM_COPYFROM_WITH_RETURN(&sv, (void *)data, sizeof (sv));
+
+ retv.drm_di_major = DRM_IF_MAJOR;
+ retv.drm_di_minor = DRM_IF_MINOR;
+ retv.drm_dd_major = dev->driver->driver_major;
+ retv.drm_dd_minor = dev->driver->driver_minor;
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &retv, sizeof (sv));
+
+ if (sv.drm_di_major != -1) {
+ if (sv.drm_di_major != DRM_IF_MAJOR ||
+ sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
+ return (EINVAL);
+ if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
+ dev->if_version = DRM_MAX(if_version, dev->if_version);
+ if (sv.drm_di_minor >= 1) {
+ /*
+ * Version 1.1 includes tying of DRM to specific device
+ */
+ (void) drm_set_busid(dev);
+ }
+ }
+
+ if (sv.drm_dd_major != -1) {
+ if (sv.drm_dd_major != dev->driver->driver_major ||
+ sv.drm_dd_minor < 0 ||
+ sv.drm_dd_minor > dev->driver->driver_minor)
+ return (EINVAL);
+ }
+ return (0);
+}
+
+
+/*ARGSUSED*/
+int
+drm_noop(DRM_IOCTL_ARGS)
+{
+ DRM_DEBUG("drm_noop\n");
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_version(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_version_t version;
+ size_t len;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_version_32_t version32;
+
+ DRM_COPYFROM_WITH_RETURN(&version32,
+ (void *)data, sizeof (drm_version_32_t));
+ version.name_len = version32.name_len;
+ version.name = (char *)(uintptr_t)version32.name;
+ version.date_len = version32.date_len;
+ version.date = (char *)(uintptr_t)version32.date;
+ version.desc_len = version32.desc_len;
+ version.desc = (char *)(uintptr_t)version32.desc;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&version, (void *)data,
+ sizeof (version));
+
+#define DRM_COPY(name, value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ if (DRM_COPY_TO_USER(name, value, len)) \
+ return (EFAULT); \
+ }
+
+ version.version_major = dev->driver->driver_major;
+ version.version_minor = dev->driver->driver_minor;
+ version.version_patchlevel = dev->driver->driver_patchlevel;
+
+ DRM_COPY(version.name, dev->driver->driver_name);
+ DRM_COPY(version.date, dev->driver->driver_date);
+ DRM_COPY(version.desc, dev->driver->driver_desc);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_version_32_t version32;
+
+ version32.version_major = version.version_major;
+ version32.version_minor = version.version_minor;
+ version32.version_patchlevel = version.version_patchlevel;
+ version32.name_len = (uint32_t)version.name_len;
+ version32.name = (caddr32_t)(uintptr_t)version.name;
+ version32.date_len = (uint32_t)version.date_len;
+ version32.date = (caddr32_t)(uintptr_t)version.date;
+ version32.desc_len = (uint32_t)version.desc_len;
+ version32.desc = (caddr32_t)(uintptr_t)version.desc;
+ DRM_COPYTO_WITH_RETURN((void *)data, &version32,
+ sizeof (drm_version_32_t));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &version,
+ sizeof (version));
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_irq.c b/usr/src/uts/common/io/drm/drm_irq.c
new file mode 100644
index 0000000..3d3640a
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_irq.c
@@ -0,0 +1,581 @@
+/*
+ * drm_irq.c -- IRQ IOCTL and function support
+ * Created: Fri Oct 18 2003 by anholt@FreeBSD.org
+ */
+/*
+ * Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_io32.h"
+
+/*ARGSUSED*/
+int
+drm_irq_by_busid(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_irq_busid_t irq;
+
+ DRM_COPYFROM_WITH_RETURN(&irq, (void *)data, sizeof (irq));
+
+ if ((irq.busnum >> 8) != dev->pci_domain ||
+ (irq.busnum & 0xff) != dev->pci_bus ||
+ irq.devnum != dev->pci_slot ||
+ irq.funcnum != dev->pci_func)
+ return (EINVAL);
+
+ irq.irq = dev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n",
+ irq.busnum, irq.devnum, irq.funcnum, irq.irq);
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &irq, sizeof (irq));
+
+ return (0);
+}
+
+
+static irqreturn_t
+drm_irq_handler_wrap(DRM_IRQ_ARGS)
+{
+ drm_device_t *dev = (void *)arg;
+ int ret;
+
+ mutex_enter(&dev->irq_lock);
+ ret = dev->driver->irq_handler(arg);
+ mutex_exit(&dev->irq_lock);
+
+ return (ret);
+}
+
+static void vblank_disable_fn(void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ int i;
+
+ if (!dev->vblank_disable_allowed)
+ return;
+
+ for (i = 0; i < dev->num_crtcs; i++) {
+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+ atomic_read(&dev->vblank_enabled[i]) == 1) {
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ dev->driver->disable_vblank(dev, i);
+ atomic_set(&dev->vblank_enabled[i], 0);
+ DRM_DEBUG("disable vblank");
+ }
+ }
+}
+
+void
+drm_vblank_cleanup(struct drm_device *dev)
+{
+
+ /* Bail if the driver didn't call drm_vblank_init() */
+ if (dev->num_crtcs == 0)
+ return;
+
+ vblank_disable_fn((void *)dev);
+
+ drm_free(dev->vbl_queues, sizeof (wait_queue_head_t) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->vbl_sigs, sizeof (struct drm_vbl_sig) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->_vblank_count, sizeof (atomic_t) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->vblank_refcount, sizeof (atomic_t) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->vblank_enabled, sizeof (int) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->last_vblank, sizeof (u32) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->vblank_inmodeset, sizeof (*dev->vblank_inmodeset) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ dev->num_crtcs = 0;
+}
+
+int
+drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+ int i, ret = ENOMEM;
+
+ atomic_set(&dev->vbl_signal_pending, 0);
+ dev->num_crtcs = num_crtcs;
+
+
+ dev->vbl_queues = drm_alloc(sizeof (wait_queue_head_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vbl_queues)
+ goto err;
+
+ dev->vbl_sigs = drm_alloc(sizeof (struct drm_vbl_sig) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vbl_sigs)
+ goto err;
+
+ dev->_vblank_count = drm_alloc(sizeof (atomic_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->_vblank_count)
+ goto err;
+
+ dev->vblank_refcount = drm_alloc(sizeof (atomic_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_refcount)
+ goto err;
+
+ dev->vblank_enabled = drm_alloc(num_crtcs * sizeof (int),
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_enabled)
+ goto err;
+
+ dev->last_vblank = drm_alloc(num_crtcs * sizeof (u32), DRM_MEM_DRIVER);
+ if (!dev->last_vblank)
+ goto err;
+
+ dev->vblank_inmodeset = drm_alloc(num_crtcs * sizeof (int),
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_inmodeset)
+ goto err;
+
+ /* Zero per-crtc vblank stuff */
+ for (i = 0; i < num_crtcs; i++) {
+ DRM_INIT_WAITQUEUE(&dev->vbl_queues[i], DRM_INTR_PRI(dev));
+ TAILQ_INIT(&dev->vbl_sigs[i]);
+ atomic_set(&dev->_vblank_count[i], 0);
+ atomic_set(&dev->vblank_refcount[i], 0);
+ }
+
+ dev->vblank_disable_allowed = 1;
+ return (0);
+
+err:
+ DRM_ERROR("drm_vblank_init: alloc error");
+ drm_vblank_cleanup(dev);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+drm_install_irq_handle(drm_device_t *dev)
+{
+ dev_info_t *dip = dev->dip;
+
+ if (dip == NULL) {
+ DRM_ERROR("drm_install_irq_handle: cannot get vgatext's dip");
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_intr_hilevel(dip, 0) != 0) {
+ DRM_ERROR("drm_install_irq_handle: "
+ "high-level interrupts are not supported");
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_get_iblock_cookie(dip, (uint_t)0,
+ &dev->intr_block) != DDI_SUCCESS) {
+ DRM_ERROR("drm_install_irq_handle: cannot get iblock cookie");
+ return (DDI_FAILURE);
+ }
+
+ /* setup the interrupt handler */
+ if (ddi_add_intr(dip, 0, &dev->intr_block,
+ (ddi_idevice_cookie_t *)NULL, drm_irq_handler_wrap,
+ (caddr_t)dev) != DDI_SUCCESS) {
+ DRM_ERROR("drm_install_irq_handle: ddi_add_intr failed");
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+drm_irq_install(drm_device_t *dev)
+{
+ int ret;
+
+ if (dev->dev_private == NULL) {
+ DRM_ERROR("drm_irq_install: dev_private is NULL");
+ return (EINVAL);
+ }
+
+ if (dev->irq_enabled) {
+ DRM_ERROR("drm_irq_install: irq already enabled");
+ return (EBUSY);
+ }
+
+ DRM_DEBUG("drm_irq_install irq=%d\n", dev->irq);
+
+ /* before installing handler */
+ ret = dev->driver->irq_preinstall(dev);
+ if (ret)
+ return (EINVAL);
+
+ /* install handler */
+ ret = drm_install_irq_handle(dev);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("drm_irq_install: drm_install_irq_handle failed");
+ return (ret);
+ }
+
+ /* after installing handler */
+ dev->driver->irq_postinstall(dev);
+
+ dev->irq_enabled = 1;
+ dev->context_flag = 0;
+
+ return (0);
+}
+
+static void
+drm_uninstall_irq_handle(drm_device_t *dev)
+{
+ ASSERT(dev->dip);
+ ddi_remove_intr(dev->dip, 0, dev->intr_block);
+}
+
+
+/*ARGSUSED*/
+int
+drm_irq_uninstall(drm_device_t *dev)
+{
+ int i;
+ if (!dev->irq_enabled) {
+ return (EINVAL);
+ }
+ dev->irq_enabled = 0;
+
+ /*
+ * Wake up any waiters so they don't hang.
+ */
+ DRM_SPINLOCK(&dev->vbl_lock);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ DRM_WAKEUP(&dev->vbl_queues[i]);
+ dev->vblank_enabled[i] = 0;
+ }
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+
+ dev->driver->irq_uninstall(dev);
+ drm_uninstall_irq_handle(dev);
+ dev->locked_tasklet_func = NULL;
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+drm_control(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_control_t ctl;
+ int err;
+
+ DRM_COPYFROM_WITH_RETURN(&ctl, (void *)data, sizeof (ctl));
+
+ switch (ctl.func) {
+ case DRM_INST_HANDLER:
+ /*
+ * Handle drivers whose DRM used to require IRQ setup but the
+ * no longer does.
+ */
+ return (drm_irq_install(dev));
+ case DRM_UNINST_HANDLER:
+ err = drm_irq_uninstall(dev);
+ return (err);
+ default:
+ return (EINVAL);
+ }
+}
+
+u32
+drm_vblank_count(struct drm_device *dev, int crtc)
+{
+ return (atomic_read(&dev->_vblank_count[crtc]));
+}
+
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+ u32 cur_vblank, diff;
+ /*
+ * Interrupts were disabled prior to this call, so deal with counter
+ * wrap if needed.
+ * NOTE! It's possible we lost a full dev->max_vblank_count events
+ * here if the register is small or we had vblank interrupts off for
+ * a long time.
+ */
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ diff = cur_vblank - dev->last_vblank[crtc];
+ if (cur_vblank < dev->last_vblank[crtc]) {
+ diff += dev->max_vblank_count;
+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ }
+
+ atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+
+static timeout_id_t timer_id = NULL;
+
+int
+drm_vblank_get(struct drm_device *dev, int crtc)
+{
+ int ret = 0;
+
+ DRM_SPINLOCK(&dev->vbl_lock);
+
+ if (timer_id != NULL) {
+ (void) untimeout(timer_id);
+ timer_id = NULL;
+ }
+
+ /* Going from 0->1 means we have to enable interrupts again */
+ atomic_add(1, &dev->vblank_refcount[crtc]);
+ if (dev->vblank_refcount[crtc] == 1 &&
+ atomic_read(&dev->vblank_enabled[crtc]) == 0) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ atomic_set(&dev->vblank_enabled[crtc], 1);
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+
+ return (ret);
+}
+
+void
+drm_vblank_put(struct drm_device *dev, int crtc)
+{
+ DRM_SPINLOCK(&dev->vbl_lock);
+ /* Last user schedules interrupt disable */
+ atomic_dec(&dev->vblank_refcount[crtc]);
+
+ if (dev->vblank_refcount[crtc] == 0)
+ timer_id = timeout(vblank_disable_fn, (void *) dev, 5*DRM_HZ);
+
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+}
+
+/*
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets. If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+/*ARGSUSED*/
+int
+drm_modeset_ctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_modeset_ctl modeset;
+ int crtc, ret = 0;
+
+ /* If drm_vblank_init() hasn't been called yet, just no-op */
+ if (!dev->num_crtcs)
+ goto out;
+
+ DRM_COPYFROM_WITH_RETURN(&modeset, (void *)data,
+ sizeof (modeset));
+
+ crtc = modeset.crtc;
+ if (crtc >= dev->num_crtcs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+ switch (modeset.cmd) {
+ case _DRM_PRE_MODESET:
+ if (!dev->vblank_inmodeset[crtc]) {
+ dev->vblank_inmodeset[crtc] = 1;
+ ret = drm_vblank_get(dev, crtc);
+ }
+ break;
+ case _DRM_POST_MODESET:
+ if (dev->vblank_inmodeset[crtc]) {
+ DRM_SPINLOCK(&dev->vbl_lock);
+ dev->vblank_disable_allowed = 1;
+ dev->vblank_inmodeset[crtc] = 0;
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+ drm_vblank_put(dev, crtc);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+drm_wait_vblank(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_wait_vblank_t vblwait;
+ int ret, flags, crtc;
+ unsigned int sequence;
+
+ if (!dev->irq_enabled) {
+ DRM_ERROR("wait vblank, EINVAL");
+ return (EINVAL);
+ }
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_wait_vblank_32_t vblwait32;
+ DRM_COPYFROM_WITH_RETURN(&vblwait32, (void *)data,
+ sizeof (vblwait32));
+ vblwait.request.type = vblwait32.request.type;
+ vblwait.request.sequence = vblwait32.request.sequence;
+ vblwait.request.signal = vblwait32.request.signal;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&vblwait, (void *)data,
+ sizeof (vblwait));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+
+ if (vblwait.request.type &
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+ DRM_ERROR("drm_wait_vblank: wrong request type 0x%x",
+ vblwait.request.type);
+ return (EINVAL);
+ }
+
+ flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ if (crtc >= dev->num_crtcs) {
+ DRM_ERROR("wait vblank operation not support");
+ return (ENOTSUP);
+ }
+ ret = drm_vblank_get(dev, crtc);
+ if (ret) {
+ DRM_ERROR("can't get drm vblank %d", ret);
+ return (ret);
+ }
+ sequence = drm_vblank_count(dev, crtc);
+
+ switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
+ case _DRM_VBLANK_RELATIVE:
+ vblwait.request.sequence += sequence;
+ vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
+ /*FALLTHROUGH*/
+ case _DRM_VBLANK_ABSOLUTE:
+ break;
+ default:
+ DRM_DEBUG("wait vblank return EINVAL");
+ return (EINVAL);
+ }
+
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (sequence - vblwait.request.sequence) <= (1<<23)) {
+ vblwait.request.sequence = sequence + 1;
+ }
+
+ if (flags & _DRM_VBLANK_SIGNAL) {
+ /*
+ * Don't block process, send signal when vblank interrupt
+ */
+ DRM_ERROR("NOT SUPPORT YET, SHOULD BE ADDED");
+ cmn_err(CE_WARN, "NOT SUPPORT YET, SHOULD BE ADDED");
+ ret = EINVAL;
+ goto done;
+ } else {
+ /* block until vblank interupt */
+ /* shared code returns -errno */
+ DRM_WAIT_ON(ret, &dev->vbl_queues[crtc], 3 * DRM_HZ,
+ (((drm_vblank_count(dev, crtc)
+ - vblwait.request.sequence) <= (1 << 23)) ||
+ !dev->irq_enabled));
+ if (ret != EINTR) {
+ struct timeval now;
+ (void) uniqtime(&now);
+ vblwait.reply.tval_sec = now.tv_sec;
+ vblwait.reply.tval_usec = now.tv_usec;
+ vblwait.reply.sequence = drm_vblank_count(dev, crtc);
+ }
+ }
+
+done:
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_wait_vblank_32_t vblwait32;
+ vblwait32.reply.type = vblwait.reply.type;
+ vblwait32.reply.sequence = vblwait.reply.sequence;
+ vblwait32.reply.tval_sec = (int32_t)vblwait.reply.tval_sec;
+ vblwait32.reply.tval_usec = (int32_t)vblwait.reply.tval_usec;
+ DRM_COPYTO_WITH_RETURN((void *)data, &vblwait32,
+ sizeof (vblwait32));
+ } else {
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &vblwait,
+ sizeof (vblwait));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+
+ drm_vblank_put(dev, crtc);
+ return (ret);
+}
+
+
+/*ARGSUSED*/
+void
+drm_vbl_send_signals(drm_device_t *dev)
+{
+ DRM_DEBUG("drm_vbl_send_signals");
+}
+
+void
+drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+ atomic_inc(&dev->_vblank_count[crtc]);
+ DRM_WAKEUP(&dev->vbl_queues[crtc]);
+}
diff --git a/usr/src/uts/common/io/drm/drm_kstat.c b/usr/src/uts/common/io/drm/drm_kstat.c
new file mode 100644
index 0000000..23e51fe
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_kstat.c
@@ -0,0 +1,99 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc.
+ * All rights reserved. Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include <sys/kstat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+
+static char *drmkstat_name[] = {
+ "opens",
+ "closes",
+ "IOCTLs",
+ "locks",
+ "unlocks",
+ NULL
+};
+
+static int
+drm_kstat_update(kstat_t *ksp, int flag)
+{
+ drm_device_t *sc;
+ kstat_named_t *knp;
+ int tmp;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ sc = ksp->ks_private;
+ knp = ksp->ks_data;
+
+ for (tmp = 1; tmp < 6; tmp++) {
+ (knp++)->value.ui32 = sc->counts[tmp];
+ }
+
+ return (0);
+}
+
+int
+drm_init_kstats(drm_device_t *sc)
+{
+ int instance;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ char *np;
+ char **aknp;
+
+ instance = ddi_get_instance(sc->dip);
+ aknp = drmkstat_name;
+ ksp = kstat_create("drm", instance, "drminfo", "drm",
+ KSTAT_TYPE_NAMED, sizeof (drmkstat_name)/sizeof (char *) - 1,
+ KSTAT_FLAG_PERSISTENT);
+ if (ksp == NULL)
+ return (NULL);
+
+ ksp->ks_private = sc;
+ ksp->ks_update = drm_kstat_update;
+ for (knp = ksp->ks_data; (np = (*aknp)) != NULL; knp++, aknp++) {
+ kstat_named_init(knp, np, KSTAT_DATA_UINT32);
+ }
+ kstat_install(ksp);
+
+ sc->asoft_ksp = ksp;
+
+ return (0);
+}
+
+void
+drm_fini_kstats(drm_device_t *sc)
+{
+ if (sc->asoft_ksp)
+ kstat_delete(sc->asoft_ksp);
+ else
+ cmn_err(CE_WARN, "attempt to delete null kstat");
+}
diff --git a/usr/src/uts/common/io/drm/drm_linux_list.h b/usr/src/uts/common/io/drm/drm_linux_list.h
new file mode 100644
index 0000000..02a4809
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_linux_list.h
@@ -0,0 +1,99 @@
+/*
+ * drm_linux_list.h -- linux list functions for the BSDs.
+ * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
+ */
+/*
+ * -
+ * Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DRM_LINUX_LIST_H_
+#define _DRM_LINUX_LIST_H_
+
+struct list_head {
+ struct list_head *next, *prev;
+ caddr_t contain_ptr;
+};
+
+/* Cheat, assume the list_head is at the start of the struct */
+#define list_entry(entry, type, member) (type *)(uintptr_t)(entry->contain_ptr)
+
+#define INIT_LIST_HEAD(head) { \
+ (head)->next = head; \
+ (head)->prev = head; \
+ (head)->contain_ptr = (caddr_t)head; \
+}
+
+#define list_add(entry, head, con_ptr) { \
+ (head)->next->prev = entry; \
+ (entry)->next = (head)->next; \
+ (entry)->prev = head; \
+ (head)->next = entry; \
+ (entry)->contain_ptr = con_ptr; \
+}
+
+#define list_add_tail(entry, head, con_ptr) { \
+ (entry)->prev = (head)->prev; \
+ (entry)->next = head; \
+ (head)->prev->next = entry; \
+ (head)->prev = entry; \
+ (entry)->contain_ptr = con_ptr; \
+}
+
+#define list_del(entry) { \
+ (entry)->next->prev = (entry)->prev; \
+ (entry)->prev->next = (entry)->next; \
+ (entry)->contain_ptr = NULL; \
+}
+
+#define list_for_each(entry, head) \
+ for (entry = (head)->next; entry != head; entry = (entry)->next)
+
+#define list_for_each_safe(entry, temp, head) \
+ for (entry = (head)->next, temp = (entry)->next; \
+ entry != head; \
+ entry = temp, temp = temp->next)
+
+#define list_del_init(entry) { \
+ list_del(entry); \
+ INIT_LIST_HEAD(entry); \
+}
+
+#define list_move_tail(entry, head, con_ptr) { \
+ list_del(entry); \
+ list_add_tail(entry, head, con_ptr); \
+}
+
+#define list_empty(head) ((head)->next == head)
+
+#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_lock.c b/usr/src/uts/common/io/drm/drm_lock.c
new file mode 100644
index 0000000..6930a47
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_lock.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * lock.c -- IOCTLs for locking -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include "drmP.h"
+
+int
+drm_lock_take(drm_lock_data_t *lock_data, unsigned int context)
+{
+ unsigned int old, new;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD)
+ new = old | _DRM_LOCK_CONT;
+ else
+ new = context | _DRM_LOCK_HELD;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return (0);
+ }
+ }
+ if (new == (context | _DRM_LOCK_HELD)) {
+ /* Have lock */
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ */
+int
+drm_lock_transfer(drm_device_t *dev, drm_lock_data_t *lock_data,
+ unsigned int context)
+{
+ unsigned int old, new;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ dev->lock.filp = NULL;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ return (1);
+}
+
+int
+drm_lock_free(drm_device_t *dev, volatile unsigned int *lock,
+ unsigned int context)
+{
+ unsigned int old, new;
+
+ mutex_enter(&(dev->lock.lock_mutex));
+ dev->lock.filp = NULL;
+ do {
+ old = *lock;
+ new = 0;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ if (_DRM_LOCK_IS_HELD(old) &&
+ (_DRM_LOCKING_CONTEXT(old) != context)) {
+ DRM_ERROR("%d freed heavyweight lock held by %d\n",
+ context, _DRM_LOCKING_CONTEXT(old));
+ mutex_exit(&(dev->lock.lock_mutex));
+ return (1);
+ }
+ cv_broadcast(&(dev->lock.lock_cv));
+ mutex_exit(&(dev->lock.lock_mutex));
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_lock(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_lock_t lock;
+ int ret = 0;
+
+ DRM_COPYFROM_WITH_RETURN(&lock, (void *)data, sizeof (lock));
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock.context);
+ return (EINVAL);
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
+ lock.flags);
+ if (dev->driver->use_dma_queue && lock.context < 0)
+ return (EINVAL);
+
+ mutex_enter(&(dev->lock.lock_mutex));
+ for (;;) {
+ if (drm_lock_take(&dev->lock, lock.context)) {
+ dev->lock.filp = fpriv;
+ dev->lock.lock_time = ddi_get_lbolt();
+ break; /* Got lock */
+ }
+ ret = cv_wait_sig(&(dev->lock.lock_cv),
+ &(dev->lock.lock_mutex));
+
+ if (ret == 0) {
+ mutex_exit(&(dev->lock.lock_mutex));
+ return (EINTR);
+ }
+ }
+ mutex_exit(&(dev->lock.lock_mutex));
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+ if (dev->driver->dma_quiescent != NULL &&
+ (lock.flags & _DRM_LOCK_QUIESCENT))
+ dev->driver->dma_quiescent(dev);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+drm_unlock(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_lock_t lock;
+
+ DRM_COPYFROM_WITH_RETURN(&lock, (void *)data, sizeof (lock));
+
+ DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
+ lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
+ lock.flags);
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock.context);
+ return (EINVAL);
+ }
+ atomic_inc_32(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ DRM_LOCK();
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock, lock.context)) {
+ DRM_ERROR("drm_unlock\n");
+ }
+ DRM_UNLOCK();
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_memory.c b/usr/src/uts/common/io/drm/drm_memory.c
new file mode 100644
index 0000000..cf2d5f6
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_memory.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
+ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include "drmP.h"
+
+/* Device memory access structure */
+typedef struct drm_device_iomap {
+ uint_t physical; /* physical address */
+ uint_t size; /* size of mapping */
+ uint_t drm_regnum; /* register number */
+ caddr_t drm_base; /* kernel virtual address */
+ ddi_acc_handle_t drm_handle; /* data access handle */
+} drm_device_iomap_t;
+
+void
+drm_mem_init(void)
+{
+}
+
+void
+drm_mem_uninit(void)
+{
+}
+
+/*ARGSUSED*/
+void *
+drm_alloc(size_t size, int area)
+{
+ return (kmem_zalloc(1 * size, KM_NOSLEEP));
+}
+
+/*ARGSUSED*/
+void *
+drm_calloc(size_t nmemb, size_t size, int area)
+{
+ return (kmem_zalloc(size * nmemb, KM_NOSLEEP));
+}
+
+/*ARGSUSED*/
+void *
+drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
+{
+ void *pt;
+
+ pt = kmem_zalloc(1 * size, KM_NOSLEEP);
+ if (pt == NULL) {
+ DRM_ERROR("pt is NULL strange");
+ return (NULL);
+ }
+ if (oldpt && oldsize) {
+ bcopy(pt, oldpt, oldsize);
+ kmem_free(oldpt, oldsize);
+ }
+ return (pt);
+}
+
+/*ARGSUSED*/
+void
+drm_free(void *pt, size_t size, int area)
+{
+ kmem_free(pt, size);
+}
+
+/*ARGSUSED*/
+int
+drm_get_pci_index_reg(dev_info_t *devi, uint_t physical, uint_t size,
+ off_t *off)
+{
+ int length;
+ pci_regspec_t *regs;
+ int n_reg, i;
+ int regnum;
+ uint_t base, regsize;
+
+ regnum = -1;
+
+ if (ddi_dev_nregs(devi, &n_reg) == DDI_FAILURE) {
+ DRM_ERROR("drm_get_pci_index_reg:ddi_dev_nregs failed\n");
+ n_reg = 0;
+ return (-1);
+ }
+
+ if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
+ "assigned-addresses", (caddr_t)&regs, &length) !=
+ DDI_PROP_SUCCESS) {
+ DRM_ERROR("drm_get_pci_index_reg: ddi_getlongprop failed!\n");
+ goto error;
+ }
+
+ for (i = 0; i < n_reg; i ++) {
+ base = (uint_t)regs[i].pci_phys_low;
+ regsize = (uint_t)regs[i].pci_size_low;
+ if ((uint_t)physical >= base &&
+ (uint_t)physical < (base + regsize)) {
+ regnum = i + 1;
+ *off = (off_t)(physical - base);
+ break;
+ }
+ }
+
+ kmem_free(regs, (size_t)length);
+ return (regnum);
+error:
+ kmem_free(regs, (size_t)length);
+ return (-1);
+}
+
+/* data access attributes structure for register access */
+static ddi_device_acc_attr_t dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC,
+};
+
+int
+do_ioremap(dev_info_t *devi, drm_device_iomap_t *iomap)
+{
+ int regnum;
+ off_t offset;
+ int ret;
+
+ regnum = drm_get_pci_index_reg(devi, iomap->physical,
+ iomap->size, &offset);
+ if (regnum < 0) {
+ DRM_ERROR("do_ioremap: can not find regster entry,"
+ " start=0x%x, size=0x%x", iomap->physical, iomap->size);
+ return (ENXIO);
+ }
+
+ iomap->drm_regnum = regnum;
+
+ ret = ddi_regs_map_setup(devi, iomap->drm_regnum,
+ (caddr_t *)&(iomap->drm_base), (offset_t)offset,
+ (offset_t)iomap->size, &dev_attr, &iomap->drm_handle);
+ if (ret < 0) {
+ DRM_ERROR("do_ioremap: failed to map regs: regno=%d,"
+ " offset=0x%x", regnum, offset);
+ iomap->drm_handle = NULL;
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
+int
+drm_ioremap(drm_device_t *softstate, drm_local_map_t *map)
+{
+ drm_device_iomap_t iomap;
+ int ret;
+
+ DRM_DEBUG("drm_ioremap called\n");
+
+ bzero(&iomap, sizeof (drm_device_iomap_t));
+ iomap.physical = map->offset;
+ iomap.size = map->size;
+ ret = do_ioremap(softstate->dip, &iomap);
+
+ if (ret) {
+ DRM_ERROR("drm_ioremap: failed, physaddr=0x%x, size=0x%x",
+ map->offset, map->size);
+ return (ret);
+ }
+
+ /* ddi_acc_handle_t */
+ map->dev_handle = iomap.drm_handle;
+ map->handle = (void *)iomap.drm_base;
+ map->dev_addr = iomap.drm_base;
+
+ DRM_DEBUG(
+ "map->handle is %p map->dev_addr is %lx map->size %x",
+ (void *)map->handle, (unsigned long)map->dev_addr, map->size);
+
+ return (0);
+}
+
+void
+drm_ioremapfree(drm_local_map_t *map)
+{
+ if (map->dev_handle == NULL) {
+ DRM_ERROR("drm_ioremapfree: handle is NULL");
+ return;
+ }
+ ddi_regs_map_free(&map->dev_handle);
+}
diff --git a/usr/src/uts/common/io/drm/drm_mm.c b/usr/src/uts/common/io/drm/drm_mm.c
new file mode 100644
index 0000000..d2d70c4
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_mm.c
@@ -0,0 +1,336 @@
+/*
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files(the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice(including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented.
+ * Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+
+unsigned long
+drm_mm_tail_space(struct drm_mm *mm)
+{
+ struct list_head *tail_node;
+ struct drm_mm_node *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ if (!entry->free)
+ return (0);
+
+ return (entry->size);
+}
+
+int
+drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+{
+ struct list_head *tail_node;
+ struct drm_mm_node *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ if (!entry->free)
+ return (ENOMEM);
+
+ if (entry->size <= size)
+ return (ENOMEM);
+
+ entry->size -= size;
+ return (0);
+}
+
+
+static int
+drm_mm_create_tail_node(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size)
+{
+ struct drm_mm_node *child;
+
+ child = (struct drm_mm_node *)
+ drm_alloc(sizeof (*child), DRM_MEM_MM);
+ if (!child)
+ return (ENOMEM);
+
+ child->free = 1;
+ child->size = size;
+ child->start = start;
+ child->mm = mm;
+
+ list_add_tail(&child->ml_entry, &mm->ml_entry, (caddr_t)child);
+ list_add_tail(&child->fl_entry, &mm->fl_entry, (caddr_t)child);
+
+ return (0);
+}
+
+
+int
+drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+{
+ struct list_head *tail_node;
+ struct drm_mm_node *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ if (!entry->free) {
+ return (drm_mm_create_tail_node(mm,
+ entry->start + entry->size, size));
+ }
+ entry->size += size;
+ return (0);
+}
+
+static struct drm_mm_node *
+drm_mm_split_at_start(struct drm_mm_node *parent,
+ unsigned long size)
+{
+ struct drm_mm_node *child;
+
+ child = (struct drm_mm_node *)
+ drm_alloc(sizeof (*child), DRM_MEM_MM);
+ if (!child)
+ return (NULL);
+
+ INIT_LIST_HEAD(&child->fl_entry);
+
+ child->free = 0;
+ child->size = size;
+ child->start = parent->start;
+ child->mm = parent->mm;
+
+ list_add_tail(&child->ml_entry, &parent->ml_entry, (caddr_t)child);
+ INIT_LIST_HEAD(&child->fl_entry);
+
+ parent->size -= size;
+ parent->start += size;
+ return (child);
+}
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void
+drm_mm_put_block(struct drm_mm_node *cur)
+{
+
+ struct drm_mm *mm = cur->mm;
+ struct list_head *cur_head = &cur->ml_entry;
+ struct list_head *root_head = &mm->ml_entry;
+ struct drm_mm_node *prev_node = NULL;
+ struct drm_mm_node *next_node;
+
+ int merged = 0;
+
+ if (cur_head->prev != root_head) {
+ prev_node = list_entry(cur_head->prev,
+ struct drm_mm_node, ml_entry);
+ if (prev_node->free) {
+ prev_node->size += cur->size;
+ merged = 1;
+ }
+ }
+ if (cur_head->next != root_head) {
+ next_node = list_entry(cur_head->next,
+ struct drm_mm_node, ml_entry);
+ if (next_node->free) {
+ if (merged) {
+ prev_node->size += next_node->size;
+ list_del(&next_node->ml_entry);
+ list_del(&next_node->fl_entry);
+ drm_free(next_node,
+ sizeof (*next_node), DRM_MEM_MM);
+ } else {
+ next_node->size += cur->size;
+ next_node->start = cur->start;
+ merged = 1;
+ }
+ }
+ }
+ if (!merged) {
+ cur->free = 1;
+ list_add(&cur->fl_entry, &mm->fl_entry, (caddr_t)cur);
+ } else {
+ list_del(&cur->ml_entry);
+ drm_free(cur, sizeof (*cur), DRM_MEM_MM);
+ }
+}
+
+struct drm_mm_node *
+drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+
+ struct drm_mm_node *align_splitoff = NULL;
+ struct drm_mm_node *child;
+ unsigned tmp = 0;
+
+ if (alignment)
+ tmp = parent->start % alignment;
+
+ if (tmp) {
+ align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+ if (!align_splitoff)
+ return (NULL);
+ }
+
+ if (parent->size == size) {
+ list_del_init(&parent->fl_entry);
+ parent->free = 0;
+ return (parent);
+ } else {
+ child = drm_mm_split_at_start(parent, size);
+ }
+
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
+
+ return (child);
+}
+
+struct drm_mm_node *
+drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ int best_match)
+{
+ struct list_head *list;
+ const struct list_head *free_stack = &mm->fl_entry;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+ unsigned wasted;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each(list, free_stack) {
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
+ wasted = 0;
+
+ if (entry->size < size)
+ continue;
+
+ if (alignment) {
+ register unsigned tmp = entry->start % alignment;
+ if (tmp)
+ wasted += alignment - tmp;
+ }
+
+
+ if (entry->size >= size + wasted) {
+ if (!best_match)
+ return (entry);
+ if (size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+ }
+
+ return (best);
+}
+
+int
+drm_mm_clean(struct drm_mm *mm)
+{
+ struct list_head *head = &mm->ml_entry;
+
+ return (head->next->next == head);
+}
+
+int
+drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size)
+{
+ INIT_LIST_HEAD(&mm->ml_entry);
+ INIT_LIST_HEAD(&mm->fl_entry);
+
+ return (drm_mm_create_tail_node(mm, start, size));
+}
+
+
+void
+drm_mm_takedown(struct drm_mm *mm)
+{
+ struct list_head *bnode = mm->fl_entry.next;
+ struct drm_mm_node *entry;
+
+ entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+
+ if (entry->ml_entry.next != &mm->ml_entry ||
+ entry->fl_entry.next != &mm->fl_entry) {
+ DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+ return;
+ }
+
+ list_del(&entry->fl_entry);
+ list_del(&entry->ml_entry);
+
+ drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+}
+
+void
+drm_mm_clean_ml(const struct drm_mm *mm)
+{
+ const struct list_head *mlstack = &mm->ml_entry;
+ struct list_head *list, *temp;
+ struct drm_mm_node *entry;
+
+ if (mlstack->next == NULL)
+ return;
+
+ list_for_each_safe(list, temp, mlstack) {
+ entry = list_entry(list, struct drm_mm_node, ml_entry);
+ DRM_DEBUG("ml_entry 0x%x, size 0x%x, start 0x%x",
+ entry, entry->size, entry->start);
+
+ list_del(&entry->fl_entry);
+ list_del(&entry->ml_entry);
+ drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_msg.c b/usr/src/uts/common/io/drm/drm_msg.c
new file mode 100644
index 0000000..120776c
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_msg.c
@@ -0,0 +1,59 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+
+void
+drm_debug(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vcmn_err(CE_NOTE, fmt, ap);
+ va_end(ap);
+}
+
+void
+drm_error(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vcmn_err(CE_WARN, fmt, ap);
+ va_end(ap);
+}
+
+void
+drm_info(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vcmn_err(CE_NOTE, fmt, ap);
+ va_end(ap);
+}
diff --git a/usr/src/uts/common/io/drm/drm_pci.c b/usr/src/uts/common/io/drm/drm_pci.c
new file mode 100644
index 0000000..37a02a1
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_pci.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/* BEGIN CSTYLED */
+/**
+ * \file drm_pci.h
+ * \brief PCI consistent, DMA-accessible memory functions.
+ *
+ * \author Eric Anholt <anholt@FreeBSD.org>
+ */
+
+/*-
+ * Copyright 2003 Eric Anholt.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+/* END CSTYLED */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include <vm/seg_kmem.h>
+
+#define PCI_DEVICE(x) (((x)>>11) & 0x1f)
+#define PCI_FUNCTION(x) (((x) & 0x700) >> 8)
+#define PCI_BUS(x) (((x) & 0xff0000) >> 16)
+
+typedef struct drm_pci_resource {
+ uint_t regnum;
+ unsigned long offset;
+ unsigned long size;
+} drm_pci_resource_t;
+
+int
+pci_get_info(drm_device_t *softstate, int *bus, int *slot, int *func)
+{
+ int *regs_list;
+ uint_t nregs = 0;
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, softstate->dip,
+ DDI_PROP_DONTPASS, "reg", (int **)&regs_list, &nregs)
+ != DDI_PROP_SUCCESS) {
+ DRM_ERROR("pci_get_info: get pci function bus device failed");
+ goto error;
+ }
+ *bus = (int)PCI_BUS(regs_list[0]);
+ *slot = (int)PCI_DEVICE(regs_list[0]);
+ *func = (int)PCI_FUNCTION(regs_list[0]);
+
+ if (nregs > 0) {
+ ddi_prop_free(regs_list);
+ }
+ return (DDI_SUCCESS);
+error:
+ if (nregs > 0) {
+ ddi_prop_free(regs_list);
+ }
+ return (DDI_FAILURE);
+}
+
+int
+pci_get_irq(drm_device_t *statep)
+{
+ int irq;
+
+ extern int drm_supp_get_irq(void *);
+
+ irq = ddi_prop_get_int(DDI_DEV_T_ANY,
+ statep->dip, DDI_PROP_DONTPASS, "interrupts", -1);
+
+ if (irq > 0) {
+ irq = drm_supp_get_irq(statep->drm_handle);
+ }
+
+ return (irq);
+}
+
+int
+pci_get_vendor(drm_device_t *statep)
+{
+ int vendorid;
+
+ vendorid = ddi_prop_get_int(DDI_DEV_T_ANY,
+ statep->dip, DDI_PROP_DONTPASS, "vendor-id", 0);
+
+ return (vendorid);
+}
+
+int
+pci_get_device(drm_device_t *statep)
+{
+ int deviceid;
+
+ deviceid = ddi_prop_get_int(DDI_DEV_T_ANY,
+ statep->dip, DDI_PROP_DONTPASS, "device-id", 0);
+
+ return (deviceid);
+}
+
+void
+drm_core_ioremap(struct drm_local_map *map, drm_device_t *dev)
+{
+ if ((map->type == _DRM_AGP) && dev->agp) {
+ /*
+ * During AGP mapping initialization, we map AGP aperture
+ * into kernel space. So, when we access the memory which
+ * managed by agp gart in kernel space, we have to go
+ * through two-level address translation: kernel virtual
+ * address --> aperture address --> physical address. For
+ * improving this, here in opensourced code, agp_remap()
+ * gets invoking to dispose the mapping between agp aperture
+ * and kernel space, and directly map the actual physical
+ * memory which is allocated to agp gart to kernel space.
+ * After that, access to physical memory managed by agp gart
+ * hardware in kernel space doesn't go through agp hardware,
+ * it will be: kernel virtual ---> physical address.
+ * Obviously, it is more efficient. But in solaris operating
+ * system, the ioctl AGPIOC_ALLOCATE of apggart driver does
+ * not return physical address. We are unable to create the
+ * direct mapping between kernel space and agp memory. So,
+ * we remove the calling to agp_remap().
+ */
+ DRM_DEBUG("drm_core_ioremap: skipping agp_remap\n");
+ } else {
+ (void) drm_ioremap(dev, map);
+
+ }
+}
+
+/*ARGSUSED*/
+void
+drm_core_ioremapfree(struct drm_local_map *map, drm_device_t *dev)
+{
+ if (map->type != _DRM_AGP) {
+ if (map->handle && map->size)
+ drm_ioremapfree(map);
+ } else {
+ /*
+ * Refer to the comments in drm_core_ioremap() where we removed
+ * the calling to agp_remap(), correspondingly, we remove the
+ * calling to agp_remap_free(dev, map);
+ */
+ DRM_DEBUG("drm_core_ioremap: skipping agp_remap_free\n");
+ }
+}
+
+struct drm_local_map *
+drm_core_findmap(drm_device_t *dev, unsigned long handle)
+{
+ drm_local_map_t *map;
+
+ DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+
+/*
+ * For the time being, we compare the low 32 bit only,
+ * We will hash handle to 32-bit to solve this issue later.
+ */
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if ((((unsigned long)map->handle) & 0x00000000ffffffff)
+ == (handle & 0x00000000ffffffff))
+ return (map);
+ }
+
+ return (NULL);
+}
+
+/*
+ * pci_alloc_consistent()
+ */
+static ddi_dma_attr_t hw_dma_attr = {
+ DMA_ATTR_V0, /* version */
+ 0, /* addr_lo */
+ 0xffffffff, /* addr_hi */
+ 0xffffffff, /* count_max */
+ 4096, /* alignment */
+ 0xfff, /* burstsize */
+ 1, /* minxfer */
+ 0xffffffff, /* maxxfer */
+ 0xffffffff, /* seg */
+ 1, /* sgllen */
+ 4, /* granular */
+ 0 /* flags */
+};
+
+static ddi_device_acc_attr_t hw_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+
+void *
+drm_pci_alloc(drm_device_t *dev, size_t size,
+ size_t align, dma_addr_t maxaddr, int segments)
+{
+ drm_dma_handle_t *dmah;
+ uint_t count;
+ int ret = DDI_FAILURE;
+
+ /* allocat continous physical memory for hw status page */
+ if (align == 0)
+ hw_dma_attr.dma_attr_align = 1;
+ else
+ hw_dma_attr.dma_attr_align = align;
+
+ hw_dma_attr.dma_attr_addr_hi = maxaddr;
+ hw_dma_attr.dma_attr_sgllen = segments;
+
+ dmah = kmem_zalloc(sizeof (drm_dma_handle_t), KM_SLEEP);
+ if (ret = ddi_dma_alloc_handle(dev->dip, &hw_dma_attr,
+ DDI_DMA_SLEEP, NULL, &dmah->dma_hdl)) {
+ DRM_ERROR("drm_pci_alloc:ddi_dma_alloc_handle failed\n");
+ goto err3;
+ }
+
+ if (ret = ddi_dma_mem_alloc(dmah->dma_hdl, size, &hw_acc_attr,
+ DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
+ DDI_DMA_SLEEP, NULL, (caddr_t *)&dmah->vaddr,
+ &dmah->real_sz, &dmah->acc_hdl)) {
+ DRM_ERROR("drm_pci_alloc: ddi_dma_mem_alloc failed\n");
+ goto err2;
+ }
+
+ ret = ddi_dma_addr_bind_handle(dmah->dma_hdl, NULL,
+ (caddr_t)dmah->vaddr, dmah->real_sz,
+ DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
+ DDI_DMA_SLEEP, NULL, &dmah->cookie, &count);
+ if (ret != DDI_DMA_MAPPED) {
+ DRM_ERROR("drm_pci_alloc: alloc phys memory failed");
+ goto err1;
+ }
+
+ if (count > segments) {
+ (void) ddi_dma_unbind_handle(dmah->dma_hdl);
+ goto err1;
+ }
+
+ dmah->cookie_num = count;
+ if (count == 1)
+ dmah->paddr = dmah->cookie.dmac_address;
+
+ return (dmah);
+
+err1:
+ ddi_dma_mem_free(&dmah->acc_hdl);
+err2:
+ ddi_dma_free_handle(&dmah->dma_hdl);
+err3:
+ kmem_free(dmah, sizeof (*dmah));
+ return (NULL);
+}
+
+/*
+ * pci_free_consistent()
+ */
+/*ARGSUSED*/
+void
+drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
+{
+ ASSERT(dmah != NULL);
+ (void) ddi_dma_unbind_handle(dmah->dma_hdl);
+ ddi_dma_mem_free(&dmah->acc_hdl);
+ ddi_dma_free_handle(&dmah->dma_hdl);
+ kmem_free(dmah, sizeof (drm_dma_handle_t));
+}
+
+int
+do_get_pci_res(drm_device_t *dev, drm_pci_resource_t *resp)
+{
+ int length;
+ pci_regspec_t *regs;
+
+ if (ddi_getlongprop(
+ DDI_DEV_T_ANY, dev->dip, DDI_PROP_DONTPASS,
+ "assigned-addresses", (caddr_t)&regs, &length) !=
+ DDI_PROP_SUCCESS) {
+ DRM_ERROR("do_get_pci_res: ddi_getlongprop failed!\n");
+ return (EFAULT);
+ }
+ resp->offset =
+ (unsigned long)regs[resp->regnum].pci_phys_low;
+ resp->size =
+ (unsigned long)regs[resp->regnum].pci_size_low;
+ kmem_free(regs, (size_t)length);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+unsigned long
+drm_get_resource_start(drm_device_t *softstate, unsigned int regnum)
+{
+ drm_pci_resource_t res;
+ int ret;
+
+ res.regnum = regnum;
+
+ ret = do_get_pci_res(softstate, &res);
+
+ if (ret != 0) {
+ DRM_ERROR("drm_get_resource_start: ioctl failed");
+ return (0);
+ }
+
+ return (res.offset);
+
+}
+
+/*ARGSUSED*/
+unsigned long
+drm_get_resource_len(drm_device_t *softstate, unsigned int regnum)
+{
+ drm_pci_resource_t res;
+ int ret;
+
+ res.regnum = regnum;
+
+ ret = do_get_pci_res(softstate, &res);
+
+ if (ret != 0) {
+ DRM_ERROR("drm_get_resource_len: ioctl failed");
+ return (0);
+ }
+
+ return (res.size);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sarea.h b/usr/src/uts/common/io/drm/drm_sarea.h
new file mode 100644
index 0000000..302edcd
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sarea.h
@@ -0,0 +1,81 @@
+/*
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel D�zer <michel@daenzer.net>
+ */
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/* BEGIN CSTYLED */
+#ifndef _DRM_SAREA_H
+#define _DRM_SAREA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drm.h"
+
+/* SAREA area needs to be at least a page */
+#if defined(__alpha__)
+#define SAREA_MAX 0x2000
+#elif defined(__ia64__)
+#define SAREA_MAX 0x10000 /* 64kB */
+#else
+/* Intel 830M driver needs at least 8k SAREA */
+#define SAREA_MAX 0x2000UL
+#endif
+
+/** Maximum number of drawables in the SAREA */
+#define SAREA_MAX_DRAWABLES 256
+
+#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
+
+/** SAREA drawable */
+typedef struct drm_sarea_drawable {
+ unsigned int stamp;
+ unsigned int flags;
+} drm_sarea_drawable_t;
+
+/** SAREA frame */
+typedef struct drm_sarea_frame {
+ unsigned int x;
+ unsigned int y;
+ unsigned int width;
+ unsigned int height;
+ unsigned int fullscreen;
+} drm_sarea_frame_t;
+
+/** SAREA */
+typedef struct drm_sarea {
+ /** first thing is always the DRM locking structure */
+ drm_hw_lock_t lock;
+ /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
+ drm_hw_lock_t drawable_lock;
+ drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ drm_sarea_frame_t frame; /**< frame */
+ drm_context_t dummy_context;
+} drm_sarea_t;
+
+/* END CSTYLED */
+#endif /* _DRM_SAREA_H */
diff --git a/usr/src/uts/common/io/drm/drm_scatter.c b/usr/src/uts/common/io/drm/drm_scatter.c
new file mode 100644
index 0000000..b1d1076
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_scatter.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/* BEGIN CSTYLED */
+
+/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */
+/*-
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+/* END CSTYLED */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include <gfx_private.h>
+#include "drm_io32.h"
+
+#define DEBUG_SCATTER 0
+
+#ifdef _LP64
+#define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+#define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+void
+drm_sg_cleanup(drm_device_t *dev, drm_sg_mem_t *entry)
+{
+ int pages = entry->pages;
+
+ if (entry->busaddr) {
+ kmem_free(entry->busaddr, sizeof (*entry->busaddr) * pages);
+ entry->busaddr = NULL;
+ }
+
+ ASSERT(entry->umem_cookie == NULL);
+
+ if (entry->dmah_sg) {
+ drm_pci_free(dev, entry->dmah_sg);
+ entry->dmah_sg = NULL;
+ }
+
+ if (entry->dmah_gart) {
+ drm_pci_free(dev, entry->dmah_gart);
+ entry->dmah_gart = NULL;
+ }
+
+ if (entry) {
+ drm_free(entry, sizeof (drm_sg_mem_t), DRM_MEM_SGLISTS);
+ entry = NULL;
+ }
+}
+
+/*ARGSUSED*/
+int
+drm_sg_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ unsigned long pages;
+ drm_sg_mem_t *entry;
+ drm_dma_handle_t *dmah;
+ drm_scatter_gather_t request;
+
+ DRM_DEBUG("%s\n", "drm_sg_alloc");
+
+ if (dev->sg)
+ return (EINVAL);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_scatter_gather_32_t request32;
+
+ DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
+ sizeof (request32));
+ request.size = request32.size;
+ request.handle = request32.handle;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
+ sizeof (request));
+
+ pages = btopr(request.size);
+ DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
+ entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
+ entry->pages = (int)pages;
+ dmah = drm_pci_alloc(dev, ptob(pages), 4096, 0xfffffffful, pages);
+ if (dmah == NULL)
+ goto err_exit;
+ entry->busaddr = (void *)kmem_zalloc(sizeof (*entry->busaddr) *
+ pages, KM_SLEEP);
+
+ entry->handle = ScatterHandle((unsigned long)dmah->vaddr);
+ entry->virtual = (void *)dmah->vaddr;
+ request.handle = entry->handle;
+ entry->dmah_sg = dmah;
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_scatter_gather_32_t data32;
+
+ data32.size = (uint32_t)request.size;
+ data32.handle = (uint32_t)request.handle;
+
+ DRM_COPYTO_WITH_RETURN((void *)data, &data32,
+ sizeof (data32));
+ } else
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &request,
+ sizeof (request));
+
+ DRM_LOCK();
+ if (dev->sg) {
+ DRM_UNLOCK();
+ drm_sg_cleanup(dev, entry);
+ return (EINVAL);
+ }
+ dev->sg = entry;
+ DRM_UNLOCK();
+
+ return (0);
+
+err_exit:
+ drm_sg_cleanup(dev, entry);
+ return (ENOMEM);
+}
+
+/*ARGSUSED*/
+int
+drm_sg_free(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_scatter_gather_t request;
+ drm_sg_mem_t *entry;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_scatter_gather_32_t request32;
+
+ DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
+ sizeof (request32));
+ request.size = request32.size;
+ request.handle = request32.handle;
+ } else
+#endif
+ DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
+ sizeof (request));
+
+ DRM_LOCK();
+ entry = dev->sg;
+ dev->sg = NULL;
+ DRM_UNLOCK();
+
+ if (!entry || entry->handle != request.handle)
+ return (EINVAL);
+
+ drm_sg_cleanup(dev, entry);
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_stub.c b/usr/src/uts/common/io/drm/drm_stub.c
new file mode 100644
index 0000000..ec82b0a
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_stub.c
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_stub.h
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_core.h"
+#include "drm_linux_list.h"
+
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+
+struct idr drm_minors_idr;
+
+static int drm_minor_get_id(struct drm_device *dev, int type)
+{
+ int new_id;
+ int ret;
+ int base, limit;
+
+ switch (type) {
+ case DRM_MINOR_LEGACY:
+ base = DRM_MINOR_ID_BASE_LEGACY;
+ limit = DRM_MINOR_ID_LIMIT_LEGACY;
+ break;
+ case DRM_MINOR_CONTROL:
+ base = DRM_MINOR_ID_BASE_CONTROL;
+ limit = DRM_MINOR_ID_LIMIT_CONTROL;
+ break;
+ case DRM_MINOR_RENDER:
+ base = DRM_MINOR_ID_BASE_RENDER;
+ limit = DRM_MINOR_ID_LIMIT_RENDER;
+ break;
+ case DRM_MINOR_VGATEXT:
+ base = DRM_MINOR_ID_BASE_VGATEXT;
+ limit = DRM_MINOR_ID_LIMIT_VGATEXT;
+ break;
+ case DRM_MINOR_AGPMASTER:
+ base = DRM_MINOR_ID_BASE_AGPMASTER;
+ limit = DRM_MINOR_ID_LIMIT_AGPMASTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = idr_get_new_above(&drm_minors_idr, NULL,
+ base, &new_id);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret) {
+ return ret;
+ }
+
+ if (new_id > limit) {
+ (void) idr_remove(&drm_minors_idr, new_id);
+ return -EINVAL;
+ }
+ return new_id;
+}
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+ struct drm_master *master;
+ int i;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return NULL;
+
+ kref_init(&master->refcount);
+ mutex_init(&master->lock.lock_mutex, NULL, MUTEX_DRIVER, (void *)minor->dev->pdev->intr_block);
+ cv_init(&master->lock.lock_cv, NULL, CV_DRIVER, NULL);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ master->magiclist[i].head = NULL;
+ master->magiclist[i].tail = NULL;
+ }
+
+ master->minor = minor;
+
+ list_add_tail(&master->head, &minor->master_list, (caddr_t)master);
+
+ return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+ kref_get(&master->refcount);
+ return master;
+}
+
+void drm_master_destroy(struct kref *kref)
+{
+ struct drm_master *master = container_of(kref, struct drm_master, refcount);
+ struct drm_magic_entry *pt, *next;
+ struct drm_device *dev = master->minor->dev;
+ struct drm_map_list *r_list, *list_temp;
+ int i;
+
+ list_del(&master->head);
+
+ if (dev->driver->master_destroy)
+ dev->driver->master_destroy(dev, master);
+
+ list_for_each_entry_safe(r_list, list_temp, struct drm_map_list, &dev->maplist, head) {
+ if (r_list->master == master) {
+ (void) drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
+ }
+
+ if (master->unique) {
+ kfree(master->unique, master->unique_size);
+ master->unique = NULL;
+ master->unique_len = 0;
+
+ }
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = master->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ master->magiclist[i].head = master->magiclist[i].tail = NULL;
+ }
+
+ cv_destroy(&master->lock.lock_cv);
+ mutex_destroy(&master->lock.lock_mutex);
+
+ kfree(master, sizeof (struct drm_master));
+}
+
+void drm_master_put(struct drm_master **master)
+{
+ kref_put(&(*master)->refcount, drm_master_destroy);
+ *master = NULL;
+}
+
+/* LINTED */
+int drm_setmaster_ioctl(DRM_IOCTL_ARGS)
+{
+ int ret = 0;
+
+ if (dev->driver->entervt)
+ dev->driver->entervt(dev);
+
+ if (file->is_master)
+ return 0;
+
+ if (file->minor->master && file->minor->master != file->master)
+ return -EINVAL;
+
+ if (!file->master)
+ return -EINVAL;
+
+ if (!file->minor->master &&
+ file->minor->master != file->master) {
+ mutex_lock(&dev->struct_mutex);
+ file->minor->master = drm_master_get(file->master);
+ file->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file, false);
+ if (unlikely(ret != 0)) {
+ file->is_master = 0;
+ drm_master_put(&file->minor->master);
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ return 0;
+}
+
+/* LINTED */
+int drm_dropmaster_ioctl(DRM_IOCTL_ARGS)
+{
+ if (!file->is_master)
+ return -EINVAL;
+
+ if (!file->minor->master)
+ return -EINVAL;
+
+ if (dev->driver->leavevt)
+ dev->driver->leavevt(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file, false);
+ drm_master_put(&file->minor->master);
+ file->is_master = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+ struct drm_driver *driver)
+{
+ int retcode;
+
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
+ INIT_LIST_HEAD(&dev->gem_objects_list);
+
+ mutex_init(&dev->count_lock, NULL, MUTEX_DRIVER, (void *)pdev->intr_block);
+ mutex_init(&dev->event_lock, NULL, MUTEX_DRIVER, (void *)pdev->intr_block);
+ mutex_init(&dev->struct_mutex, NULL, MUTEX_DRIVER, NULL); //adaptive locks
+ mutex_init(&dev->ctxlist_mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&dev->irq_lock, NULL, MUTEX_DRIVER, (void *)pdev->intr_block);
+ mutex_init(&dev->track_lock, NULL, MUTEX_DRIVER, (void *)pdev->intr_block);
+ mutex_init(&dev->page_fault_lock, NULL, MUTEX_DRIVER, NULL);
+
+ dev->pdev = pdev;
+ dev->pci_device = pdev->device;
+ dev->pci_vendor = pdev->vendor;
+
+ idr_init(&dev->map_idr);
+
+ /* the DRM has 6 basic counters */
+ dev->counters = 6;
+ dev->types[0] = _DRM_STAT_LOCK;
+ dev->types[1] = _DRM_STAT_OPENS;
+ dev->types[2] = _DRM_STAT_CLOSES;
+ dev->types[3] = _DRM_STAT_IOCTLS;
+ dev->types[4] = _DRM_STAT_LOCKS;
+ dev->types[5] = _DRM_STAT_UNLOCKS;
+
+ dev->driver = driver;
+
+ retcode = drm_ctxbitmap_init(dev);
+ if (retcode) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto error_out_unreg;
+ }
+
+ if (driver->driver_features & DRIVER_GEM) {
+ retcode = drm_gem_init(dev);
+ if (retcode) {
+ DRM_ERROR("Cannot initialize graphics execution "
+ "manager (GEM)\n");
+ goto error_out_unreg;
+ }
+ }
+
+ dev->drm_wq = create_workqueue(dev->devinfo, "drm");
+ if (dev->drm_wq == NULL) {
+ DRM_ERROR("Failed to create drm workqueue.\n");
+ goto error_out_unreg;
+ }
+
+ return 0;
+
+ error_out_unreg:
+ (void)drm_lastclose(dev);
+ return retcode;
+}
+
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+{
+ struct drm_minor *new_minor;
+ int ret;
+ int minor_id;
+
+ DRM_DEBUG("\n");
+
+ minor_id = drm_minor_get_id(dev, type);
+ if (minor_id < 0)
+ return minor_id;
+
+ new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+ if (!new_minor) {
+ ret = -ENOMEM;
+ goto err_idr;
+ }
+
+ new_minor->type = type;
+ new_minor->dev = dev;
+ new_minor->index = minor_id;
+ INIT_LIST_HEAD(&new_minor->master_list);
+
+ (void) idr_replace(&drm_minors_idr, new_minor, minor_id);
+
+ if (type == DRM_MINOR_LEGACY)
+ (void) sprintf(new_minor->name, "drm%d", new_minor->index);
+ else if (type == DRM_MINOR_CONTROL)
+ (void) sprintf(new_minor->name, "controlD%d", new_minor->index);
+ else if (type == DRM_MINOR_RENDER)
+ (void) sprintf(new_minor->name, "renderD%d", new_minor->index);
+ else if (type == DRM_MINOR_VGATEXT)
+ (void) sprintf(new_minor->name, "gfx%d", new_minor->index - DRM_MINOR_ID_BASE_VGATEXT);
+ else if (type == DRM_MINOR_AGPMASTER)
+ (void) sprintf(new_minor->name, "agpmaster%d", new_minor->index - DRM_MINOR_ID_BASE_AGPMASTER);
+
+ idr_init(&new_minor->clone_idr);
+
+ ret = drm_sysfs_device_add(new_minor);
+ if (ret)
+ goto err_g2;
+ *minor = new_minor;
+
+ DRM_DEBUG("new minor assigned %d\n", minor_id);
+ return 0;
+
+
+err_g2:
+ kfree(new_minor, sizeof (*new_minor));
+err_idr:
+ (void) idr_remove(&drm_minors_idr, minor_id);
+ *minor = NULL;
+ return ret;
+}
+
+/**
+ * Register.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_dev(struct drm_device *dev, struct pci_dev *pdev,
+ struct drm_driver *driver, unsigned long driver_data)
+{
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ if ((ret = drm_fill_in_dev(dev, pdev, driver))) {
+ DRM_ERROR("DRM: Fill_in_dev failed");
+ goto err_g1;
+ }
+
+ if ((ret = drm_get_minor(dev, &dev->vgatext, DRM_MINOR_VGATEXT))) {
+ goto err_g2;
+ }
+
+ if (dev->driver->agp_support_detect)
+ dev->driver->agp_support_detect(dev, driver_data);
+
+ if (drm_core_has_AGP(dev)) {
+ if ((ret = drm_get_minor(dev, &dev->agpmaster, DRM_MINOR_AGPMASTER)))
+ goto err_g1;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g3;
+ }
+
+ if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+ goto err_g4;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, driver_data);
+ if (ret)
+ goto err_g5;
+ }
+
+ if (drm_init_kstats(dev)) {
+ DRM_ERROR("init kstats error");
+ ret = EFAULT;
+ goto err_g5;
+ }
+
+ cmn_err(CE_CONT, "!Initialized %s v%d.%d.%d Modified date %s",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date);
+
+ return 0;
+
+err_g5:
+ (void) drm_put_minor(&dev->primary);
+err_g4:
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ (void) drm_put_minor(&dev->control);
+err_g3:
+ (void) drm_put_minor(&dev->vgatext);
+err_g2:
+ if (drm_core_has_AGP(dev))
+ (void) drm_put_minor(&dev->agpmaster);
+err_g1:
+ return ret;
+}
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_minor(struct drm_minor **minor_p)
+{
+ struct drm_minor *minor = *minor_p;
+
+ DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+ drm_sysfs_device_remove(minor);
+
+ (void) idr_remove(&drm_minors_idr, minor->index);
+
+ idr_destroy(&minor->clone_idr);
+
+ kfree(minor, sizeof (*minor));
+ *minor_p = NULL;
+ return 0;
+}
+
+/**
+ * Called via drm_exit() at module unload time or when pci device is
+ * unplugged.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ * \sa drm_init
+ */
+void drm_put_dev(struct drm_device *dev)
+{
+ struct drm_driver *driver;
+ struct drm_map_list *r_list, *list_temp;
+
+ DRM_DEBUG("\n");
+
+ if (!dev) {
+ DRM_ERROR("cleanup called no dev\n");
+ return;
+ }
+ driver = dev->driver;
+
+ (void) drm_lastclose(dev);
+
+ (void) destroy_workqueue(dev->drm_wq);
+
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+
+ if (drm_core_has_AGP(dev) && dev->agp) {
+ drm_agp_cleanup(dev);
+ kfree(dev->agp, sizeof(*dev->agp));
+ dev->agp = NULL;
+ }
+
+ drm_vblank_cleanup(dev);
+
+ list_for_each_entry_safe(r_list, list_temp, struct drm_map_list, &dev->maplist, head)
+ (void) drm_rmmap(dev, r_list->map);
+ idr_destroy(&dev->map_idr);
+
+ drm_ctxbitmap_cleanup(dev);
+
+ (void) drm_put_minor(&dev->vgatext);
+
+ if (drm_core_has_AGP(dev))
+ (void) drm_put_minor(&dev->agpmaster);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ (void) drm_put_minor(&dev->control);
+
+ if (driver->driver_features & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
+ (void) drm_put_minor(&dev->primary);
+
+ mutex_destroy(&dev->irq_lock);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->struct_mutex);
+ mutex_destroy(&dev->event_lock);
+ mutex_destroy(&dev->count_lock);
+
+ drm_fini_kstats(dev);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sunmod.c b/usr/src/uts/common/io/drm/drm_sunmod.c
new file mode 100644
index 0000000..2f69229
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sunmod.c
@@ -0,0 +1,1010 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Common misc module interfaces of DRM under Solaris
+ */
+
+/*
+ * This module calls into gfx and agpmaster misc modules respectively
+ * for generic graphics operations and AGP master device support.
+ */
+
+#include "drm_sunmod.h"
+#include <sys/modctl.h>
+#include <sys/kmem.h>
+#include <vm/seg_kmem.h>
+
+static struct modlmisc modlmisc = {
+ &mod_miscops, "DRM common interfaces"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modlmisc, NULL
+};
+
+static drm_inst_list_t *drm_inst_head;
+static kmutex_t drm_inst_list_lock;
+
+static int drm_sun_open(dev_t *, int, int, cred_t *);
+static int drm_sun_close(dev_t, int, int, cred_t *);
+static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int drm_sun_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
+ size_t *, uint_t);
+
+/*
+ * devmap callbacks for AGP and PCI GART
+ */
+static int drm_devmap_map(devmap_cookie_t, dev_t,
+ uint_t, offset_t, size_t, void **);
+static int drm_devmap_dup(devmap_cookie_t, void *,
+ devmap_cookie_t, void **);
+static void drm_devmap_unmap(devmap_cookie_t, void *,
+ offset_t, size_t, devmap_cookie_t, void **, devmap_cookie_t, void **);
+
+static drm_inst_list_t *drm_supp_alloc_drv_entry(dev_info_t *);
+static drm_inst_state_t *drm_sup_devt_to_state(dev_t);
+static void drm_supp_free_drv_entry(dev_info_t *);
+
+static struct devmap_callback_ctl drm_devmap_callbacks = {
+ DEVMAP_OPS_REV, /* devmap_rev */
+ drm_devmap_map, /* devmap_map */
+ NULL, /* devmap_access */
+ drm_devmap_dup, /* devmap_dup */
+ drm_devmap_unmap /* devmap_unmap */
+};
+
+/*
+ * Common device operations structure for all DRM drivers
+ */
+struct cb_ops drm_cb_ops = {
+ drm_sun_open, /* cb_open */
+ drm_sun_close, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ drm_sun_ioctl, /* cb_ioctl */
+ drm_sun_devmap, /* cb_devmap */
+ nodev, /* cb_mmap */
+ NULL, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ 0, /* cb_stream */
+ D_NEW | D_MTSAFE |D_DEVMAP /* cb_flag */
+};
+
+int
+_init(void)
+{
+ int error;
+
+ if ((error = mod_install(&modlinkage)) != 0) {
+ return (error);
+ }
+
+ /* initialize the instance list lock */
+ mutex_init(&drm_inst_list_lock, NULL, MUTEX_DRIVER, NULL);
+ return (0);
+}
+
+int
+_fini(void)
+{
+ int err;
+
+ if ((err = mod_remove(&modlinkage)) != 0)
+ return (err);
+
+ mutex_destroy(&drm_inst_list_lock);
+ return (0);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+void *
+drm_supp_register(dev_info_t *dip, drm_device_t *dp)
+{
+ int error;
+ char buf[80];
+ int instance = ddi_get_instance(dip);
+ ddi_acc_handle_t pci_cfg_handle;
+ agp_master_softc_t *agpm;
+ drm_inst_state_t *mstate;
+ drm_inst_list_t *entry;
+ gfxp_vgatext_softc_ptr_t gfxp;
+ struct dev_ops *devop;
+
+ ASSERT(dip != NULL);
+
+ entry = drm_supp_alloc_drv_entry(dip);
+ if (entry == NULL) {
+ cmn_err(CE_WARN, "drm_supp_register: failed to get softstate");
+ return (NULL);
+ }
+ mstate = &entry->disl_state;
+
+ /*
+ * DRM drivers are required to use common cb_ops
+ */
+ devop = ddi_get_driver(dip);
+ if (devop->devo_cb_ops != &drm_cb_ops) {
+ devop->devo_cb_ops = &drm_cb_ops;
+ }
+
+ /* Generic graphics initialization */
+ gfxp = gfxp_vgatext_softc_alloc();
+ error = gfxp_vgatext_attach(dip, DDI_ATTACH, gfxp);
+ if (error != DDI_SUCCESS) {
+ DRM_ERROR("drm_supp_regiter: failed to init gfx");
+ goto exit1;
+ }
+
+ /* create a minor node for common graphics ops */
+ (void) sprintf(buf, "%s%d", GFX_NAME, instance);
+ error = ddi_create_minor_node(dip, buf, S_IFCHR,
+ INST2NODE0(instance), DDI_NT_DISPLAY, NULL);
+ if (error != DDI_SUCCESS) {
+ DRM_ERROR("drm_supp_regiter: "
+ "failed to create minor node for gfx");
+ goto exit2;
+ }
+
+ /* setup mapping for later PCI config space access */
+ error = pci_config_setup(dip, &pci_cfg_handle);
+ if (error != DDI_SUCCESS) {
+ DRM_ERROR("drm_supp_regiter: "
+ "PCI configuration space setup failed");
+ goto exit2;
+ }
+
+ /* AGP master attach */
+ agpm = NULL;
+ if (dp->driver->use_agp) {
+ DRM_DEBUG("drm_supp_regiter: driver use AGP\n");
+ error = agpmaster_attach(dip, &agpm,
+ pci_cfg_handle, INST2NODE1(instance));
+ if ((error != DDI_SUCCESS) && (dp->driver->require_agp)) {
+ DRM_ERROR("drm_supp_regiter: "
+ "AGP master support not available");
+ goto exit3;
+ }
+ }
+
+ mutex_enter(&mstate->mis_lock);
+ mstate->mis_major = ddi_driver_major(dip);
+ mstate->mis_dip = dip;
+ mstate->mis_gfxp = gfxp;
+ mstate->mis_agpm = agpm;
+ mstate->mis_cfg_hdl = pci_cfg_handle;
+ mstate->mis_devp = dp;
+ mutex_exit(&mstate->mis_lock);
+
+ /* create minor node for DRM access */
+ (void) sprintf(buf, "%s%d", DRM_DEVNODE, instance);
+ if (ddi_create_minor_node(dip, buf, S_IFCHR,
+ INST2NODE2(instance), DDI_NT_DISPLAY_DRM, 0)) {
+ DRM_ERROR("supp_regiter: faled to create minor node for drm");
+ goto exit4;
+ }
+
+ return ((void *)mstate);
+
+exit4:
+ if ((dp->driver->use_agp) && agpm)
+ agpmaster_detach(&agpm);
+exit3:
+ pci_config_teardown(&pci_cfg_handle);
+exit2:
+ (void) gfxp_vgatext_detach(dip, DDI_DETACH, gfxp);
+exit1:
+ gfxp_vgatext_softc_free(gfxp);
+ drm_supp_free_drv_entry(dip);
+ ddi_remove_minor_node(dip, NULL);
+
+ return (NULL);
+}
+
+
+int
+drm_supp_unregister(void *handle)
+{
+ drm_inst_list_t *list;
+ drm_inst_state_t *mstate;
+
+ list = (drm_inst_list_t *)handle;
+ mstate = &list->disl_state;
+ mutex_enter(&mstate->mis_lock);
+
+ /* AGP master detach */
+ if (mstate->mis_agpm != NULL)
+ agpmaster_detach(&mstate->mis_agpm);
+
+ /* free PCI config access handle */
+ if (mstate->mis_cfg_hdl)
+ pci_config_teardown(&mstate->mis_cfg_hdl);
+
+ /* graphics misc module detach */
+ if (mstate->mis_gfxp) {
+ (void) gfxp_vgatext_detach(mstate->mis_dip, DDI_DETACH,
+ mstate->mis_gfxp);
+ gfxp_vgatext_softc_free(mstate->mis_gfxp);
+ }
+
+ mstate->mis_devp = NULL;
+
+ /* remove all minor nodes */
+ ddi_remove_minor_node(mstate->mis_dip, NULL);
+ mutex_exit(&mstate->mis_lock);
+ drm_supp_free_drv_entry(mstate->mis_dip);
+
+ return (DDI_SUCCESS);
+}
+
+
+/*ARGSUSED*/
+static int
+drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+{
+ drm_inst_state_t *mstate;
+ drm_cminor_t *mp, *newp;
+ drm_device_t *dp;
+ minor_t minor;
+ int newminor;
+ int instance;
+ int err;
+
+ mstate = drm_sup_devt_to_state(*devp);
+ /*
+ * return ENXIO for deferred attach so that system can
+ * attach us again.
+ */
+ if (mstate == NULL)
+ return (ENXIO);
+
+ /*
+ * The lest significant 15 bits are used for minor_number, and
+ * the mid 3 bits are used for instance number. All minor numbers
+ * are used as follows:
+ * 0 -- gfx
+ * 1 -- agpmaster
+ * 2 -- drm
+ * (3, MAX_CLONE_MINOR) -- drm minor node for clone open.
+ */
+ minor = DEV2MINOR(*devp);
+ instance = DEV2INST(*devp);
+ ASSERT(minor <= MAX_CLONE_MINOR);
+
+ /*
+ * No operations for VGA & AGP mater devices, always return OK.
+ */
+ if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
+ return (0);
+
+ /*
+ * From here, we start to process drm
+ */
+
+ dp = mstate->mis_devp;
+ if (!dp)
+ return (ENXIO);
+
+ /*
+ * Drm driver implements a software lock to serialize access
+ * to graphics hardware based on per-process granulation. Before
+ * operating graphics hardware, all clients, including kernel
+ * and applications, must acquire this lock via DRM_IOCTL_LOCK
+ * ioctl, and release it via DRM_IOCTL_UNLOCK after finishing
+ * operations. Drm driver will grant r/w permission to the
+ * process which acquires this lock (Kernel is assumed to have
+ * process ID 0).
+ *
+ * A process might be terminated without releasing drm lock, in
+ * this case, drm driver is responsible for clearing the holding.
+ * To be informed of process exiting, drm driver uses clone open
+ * to guarantee that each call to open(9e) have one corresponding
+ * call to close(9e). In most cases, a process will close drm
+ * during process termination, so that drm driver could have a
+ * chance to release drm lock.
+ *
+ * In fact, a driver cannot know exactly when a process exits.
+ * Clone open doesn't address this issue completely: Because of
+ * inheritance, child processes inherit file descriptors from
+ * their parent. As a result, if the parent exits before its
+ * children, drm close(9e) entrypoint won't be called until all
+ * of its children terminate.
+ *
+ * Another issue brought up by inhertance is the process PID
+ * that calls the drm close() entry point may not be the same
+ * as the one who called open(). Per-process struct is allocated
+ * when a process first open() drm, and released when the process
+ * last close() drm. Since open()/close() may be not the same
+ * process, PID cannot be used for key to lookup per-process
+ * struct. So, we associate minor number with per-process struct
+ * during open()'ing, and find corresponding process struct
+ * via minor number when close() is called.
+ */
+ newp = kmem_zalloc(sizeof (drm_cminor_t), KM_SLEEP);
+ mutex_enter(&dp->dev_lock);
+ for (newminor = DRM_MIN_CLONEMINOR; newminor < MAX_CLONE_MINOR;
+ newminor ++) {
+ TAILQ_FOREACH(mp, &dp->minordevs, link) {
+ if (mp->minor == newminor)
+ break;
+ }
+ if (mp == NULL)
+ goto gotminor;
+ }
+
+ mutex_exit(&dp->dev_lock);
+ (void) kmem_free(newp, sizeof (drm_cminor_t));
+ return (EMFILE);
+
+gotminor:
+ TAILQ_INSERT_TAIL(&dp->minordevs, newp, link);
+ newp->minor = newminor;
+ mutex_exit(&dp->dev_lock);
+ err = drm_open(dp, newp, flag, otyp, credp);
+ if (err) {
+ mutex_enter(&dp->dev_lock);
+ TAILQ_REMOVE(&dp->minordevs, newp, link);
+ (void) kmem_free(newp, sizeof (drm_cminor_t));
+ mutex_exit(&dp->dev_lock);
+
+ return (err);
+ }
+
+ /* return a clone minor */
+ newminor = newminor | (instance << NBITSMNODE);
+ *devp = makedevice(getmajor(*devp), newminor);
+ return (err);
+}
+
+/*ARGSUSED*/
+static int
+drm_sun_close(dev_t dev, int flag, int otyp, cred_t *credp)
+{
+ drm_inst_state_t *mstate;
+ drm_device_t *dp;
+ minor_t minor;
+ int ret;
+
+ mstate = drm_sup_devt_to_state(dev);
+ if (mstate == NULL)
+ return (EBADF);
+
+ minor = DEV2MINOR(dev);
+ ASSERT(minor <= MAX_CLONE_MINOR);
+ if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
+ return (0);
+
+ dp = mstate->mis_devp;
+ if (dp == NULL) {
+ DRM_ERROR("drm_sun_close: NULL soft state");
+ return (ENXIO);
+ }
+
+ ret = drm_close(dp, minor, flag, otyp, credp);
+
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *credp, int *rvalp)
+{
+ extern drm_ioctl_desc_t drm_ioctls[];
+
+ drm_inst_state_t *mstate;
+ drm_device_t *dp;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+ drm_file_t *fpriv;
+ minor_t minor;
+ int retval;
+ int nr;
+
+ if (cmd == VIS_GETIDENTIFIER) {
+ if (ddi_copyout(&text_ident, (void *)arg,
+ sizeof (struct vis_identifier), mode))
+ return (EFAULT);
+ }
+
+ mstate = drm_sup_devt_to_state(dev);
+ if (mstate == NULL) {
+ return (EIO);
+ }
+
+ minor = DEV2MINOR(dev);
+ ASSERT(minor <= MAX_CLONE_MINOR);
+ switch (minor) {
+ case GFX_MINOR:
+ retval = gfxp_vgatext_ioctl(dev, cmd, arg,
+ mode, credp, rvalp, mstate->mis_gfxp);
+ return (retval);
+
+ case AGPMASTER_MINOR:
+ retval = agpmaster_ioctl(dev, cmd, arg, mode,
+ credp, rvalp, mstate->mis_agpm);
+ return (retval);
+
+ case DRM_MINOR:
+ default: /* DRM cloning minor nodes */
+ break;
+ }
+
+ dp = mstate->mis_devp;
+ ASSERT(dp != NULL);
+
+ nr = DRM_IOCTL_NR(cmd);
+ ioctl = &drm_ioctls[nr];
+ atomic_inc_32(&dp->counts[_DRM_STAT_IOCTLS]);
+
+ /* It's not a core DRM ioctl, try driver-specific. */
+ if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
+ /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
+ nr -= DRM_COMMAND_BASE;
+ if (nr > dp->driver->max_driver_ioctl) {
+ DRM_ERROR("Bad driver ioctl number, 0x%x (of 0x%x)",
+ nr, dp->driver->max_driver_ioctl);
+ return (EINVAL);
+ }
+ ioctl = &dp->driver->driver_ioctls[nr];
+ }
+
+ func = ioctl->func;
+ if (func == NULL) {
+ return (ENOTSUP);
+ }
+
+ mutex_enter(&dp->dev_lock);
+ fpriv = drm_find_file_by_proc(dp, credp);
+ mutex_exit(&dp->dev_lock);
+ if (fpriv == NULL) {
+ DRM_ERROR("drm_sun_ioctl : can't find authenticator");
+ return (EACCES);
+ }
+
+ if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
+ ((ioctl->flags & DRM_AUTH) && !fpriv->authenticated) ||
+ ((ioctl->flags & DRM_MASTER) && !fpriv->master))
+ return (EACCES);
+
+ fpriv->dev = dev;
+ fpriv->credp = credp;
+
+ retval = func(dp, arg, fpriv, mode);
+
+ return (retval);
+}
+
+/*ARGSUSED*/
+static int
+drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
+ size_t len, size_t *maplen, uint_t model)
+{
+ extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
+
+ drm_inst_state_t *mstate;
+ drm_device_t *dp;
+ ddi_umem_cookie_t cookie;
+ drm_local_map_t *map = NULL;
+ unsigned long aperbase;
+ u_offset_t handle;
+ offset_t koff;
+ caddr_t kva;
+ minor_t minor;
+ size_t length;
+ int ret;
+
+ static ddi_device_acc_attr_t dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC,
+ };
+ static ddi_device_acc_attr_t gem_dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC
+ };
+
+ mstate = drm_sup_devt_to_state(dev);
+ if (mstate == NULL)
+ return (ENXIO);
+
+ minor = DEV2MINOR(dev);
+ switch (minor) {
+ case GFX_MINOR:
+ ret = gfxp_vgatext_devmap(dev, dhp, offset, len, maplen, model,
+ mstate->mis_gfxp);
+ return (ret);
+
+ case AGPMASTER_MINOR:
+ return (ENOTSUP);
+
+ case DRM_MINOR:
+ break;
+
+ default:
+ /* DRM cloning nodes */
+ if (minor > MAX_CLONE_MINOR)
+ return (EBADF);
+ break;
+ }
+
+
+ dp = mstate->mis_devp;
+ if (dp == NULL) {
+ DRM_ERROR("drm_sun_devmap: NULL soft state");
+ return (EINVAL);
+ }
+
+ mutex_enter(&dp->dev_lock);
+
+ if (dp->driver->use_gem == 1) {
+ struct idr_list *entry;
+ drm_cminor_t *mp;
+
+ mp = drm_find_file_by_minor(dp, minor);
+ if (!mp) {
+ mutex_exit(&dp->dev_lock);
+ DRM_ERROR("drm_sun_devmap: can't find authenticator");
+ return (EACCES);
+ }
+
+ spin_lock(&dp->struct_mutex);
+ idr_list_for_each(entry, &(mp->fpriv->object_idr)) {
+ if ((uintptr_t)entry->obj == (u_offset_t)offset) {
+ map = entry->obj->map;
+ goto goon;
+ }
+ }
+goon:
+ spin_unlock(&dp->struct_mutex);
+ }
+
+ if (map == NULL) {
+ /*
+ * We will solve 32-bit application on 64-bit kernel
+ * issue later, now, we just use low 32-bit
+ */
+ handle = (u_offset_t)offset;
+ handle &= 0xffffffff;
+
+ TAILQ_FOREACH(map, &dp->maplist, link) {
+ if (handle ==
+ ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
+ break;
+ }
+
+ /*
+ * Temporarily, because offset is phys_addr for register
+ * and framebuffer, is kernel virtual_addr for others
+ * Maybe we will use hash table to solve this issue later.
+ */
+ if (map == NULL) {
+ TAILQ_FOREACH(map, &dp->maplist, link) {
+ if (handle == (map->offset & 0xffffffff))
+ break;
+ }
+ }
+ }
+
+ if (map == NULL) {
+ u_offset_t tmp;
+
+ mutex_exit(&dp->dev_lock);
+ cmn_err(CE_WARN, "Can't find map, offset=0x%llx, len=%x\n",
+ offset, (int)len);
+ cmn_err(CE_WARN, "Current mapping:\n");
+ TAILQ_FOREACH(map, &dp->maplist, link) {
+ tmp = (u_offset_t)((uintptr_t)map->handle) & 0xffffffff;
+ cmn_err(CE_WARN, "map(handle=0x%p, size=0x%lx,type=%d,"
+ "offset=0x%lx), handle=%llx, tmp=%lld", map->handle,
+ map->size, map->type, map->offset, handle, tmp);
+ }
+ return (-1);
+ }
+ if (map->flags & _DRM_RESTRICTED) {
+ mutex_exit(&dp->dev_lock);
+ cmn_err(CE_WARN, "restricted map\n");
+ return (-1);
+ }
+
+ mutex_exit(&dp->dev_lock);
+ switch (map->type) {
+ case _DRM_FRAME_BUFFER:
+ case _DRM_REGISTERS:
+ {
+ int regno;
+ off_t regoff;
+
+ regno = drm_get_pci_index_reg(dp->dip,
+ map->offset, (uint_t)len, &regoff);
+ if (regno < 0) {
+ DRM_ERROR("devmap: failed to get register"
+ " offset=0x%llx, len=0x%x", handle, len);
+ return (EINVAL);
+ }
+
+ ret = devmap_devmem_setup(dhp, dp->dip, NULL,
+ regno, (offset_t)regoff, len, PROT_ALL,
+ 0, &dev_attr);
+ if (ret != 0) {
+ *maplen = 0;
+ DRM_ERROR("devmap: failed, regno=%d,type=%d,"
+ " handle=0x%x, offset=0x%llx, len=0x%x",
+ regno, map->type, handle, offset, len);
+ return (ret);
+ }
+ *maplen = len;
+ return (ret);
+ }
+
+ case _DRM_SHM:
+ if (map->drm_umem_cookie == NULL)
+ return (EINVAL);
+ length = ptob(btopr(map->size));
+ ret = devmap_umem_setup(dhp, dp->dip, NULL,
+ map->drm_umem_cookie, 0, length,
+ PROT_ALL, IOMEM_DATA_CACHED, NULL);
+ if (ret != 0) {
+ *maplen = 0;
+ return (ret);
+ }
+ *maplen = length;
+
+ return (DDI_SUCCESS);
+
+ case _DRM_AGP:
+ if (dp->agp == NULL) {
+ cmn_err(CE_WARN, "drm_sun_devmap: attempted to mmap AGP"
+ "memory before AGP support is enabled");
+ return (DDI_FAILURE);
+ }
+
+ aperbase = dp->agp->base;
+ koff = map->offset - aperbase;
+ length = ptob(btopr(len));
+ kva = map->dev_addr;
+ cookie = gfxp_umem_cookie_init(kva, length);
+ if (cookie == NULL) {
+ cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
+ return (DDI_FAILURE);
+ }
+
+ if ((ret = devmap_umem_setup(dhp, dp->dip,
+ &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
+ IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr)) < 0) {
+ gfxp_umem_cookie_destroy(cookie);
+ cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
+ return (DDI_FAILURE);
+ }
+ *maplen = length;
+ break;
+
+ case _DRM_SCATTER_GATHER:
+ koff = map->offset - (unsigned long)(caddr_t)dp->sg->virtual;
+ kva = map->dev_addr + koff;
+ length = ptob(btopr(len));
+ if (length > map->size) {
+ cmn_err(CE_WARN, "offset=0x%lx, virtual=0x%p,"
+ "mapsize=0x%lx,len=0x%lx", map->offset,
+ dp->sg->virtual, map->size, len);
+ return (DDI_FAILURE);
+ }
+ cookie = gfxp_umem_cookie_init(kva, length);
+ if (cookie == NULL) {
+ cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
+ return (DDI_FAILURE);
+ }
+ ret = devmap_umem_setup(dhp, dp->dip,
+ &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
+ IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
+ if (ret != 0) {
+ cmn_err(CE_WARN, "sun_devmap: umem_setup fail");
+ gfxp_umem_cookie_destroy(cookie);
+ return (DDI_FAILURE);
+ }
+ *maplen = length;
+ break;
+
+ case _DRM_TTM:
+ if (map->drm_umem_cookie == NULL)
+ return (EINVAL);
+
+ if (gfxp_devmap_umem_setup(dhp, dp->dip,
+ NULL, map->drm_umem_cookie, 0, map->size, PROT_ALL,
+ IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP,
+ &gem_dev_attr)) {
+ cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
+ return (DDI_FAILURE);
+ }
+ *maplen = map->size;
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+
+}
+
+/*ARGSUSED*/
+static int
+drm_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags,
+ offset_t offset, size_t len, void **new_priv)
+{
+ devmap_handle_t *dhp;
+ drm_inst_state_t *statep;
+ struct ddi_umem_cookie *cp;
+
+ statep = drm_sup_devt_to_state(dev);
+ ASSERT(statep != NULL);
+
+ /*
+ * This driver only supports MAP_SHARED,
+ * and doesn't support MAP_PRIVATE
+ */
+ if (flags & MAP_PRIVATE) {
+ cmn_err(CE_WARN, "!DRM driver doesn't support MAP_PRIVATE");
+ return (EINVAL);
+ }
+
+ mutex_enter(&statep->dis_ctxlock);
+ dhp = (devmap_handle_t *)dhc;
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ cp->cook_refcnt = 1;
+ mutex_exit(&statep->dis_ctxlock);
+ *new_priv = statep;
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static void
+drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
+ devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
+ void **new_pvtp2)
+{
+ devmap_handle_t *dhp;
+ devmap_handle_t *ndhp;
+ drm_inst_state_t *statep;
+ struct ddi_umem_cookie *cp;
+ struct ddi_umem_cookie *ncp;
+
+ dhp = (devmap_handle_t *)dhc;
+ statep = (drm_inst_state_t *)pvtp;
+
+ mutex_enter(&statep->dis_ctxlock);
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ if (new_dhp1 != NULL) {
+ ndhp = (devmap_handle_t *)new_dhp1;
+ ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
+ ncp->cook_refcnt ++;
+ *new_pvtp1 = statep;
+ ASSERT(ncp == cp);
+ }
+
+ if (new_dhp2 != NULL) {
+ ndhp = (devmap_handle_t *)new_dhp2;
+ ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
+ ncp->cook_refcnt ++;
+ *new_pvtp2 = statep;
+ ASSERT(ncp == cp);
+ }
+
+ cp->cook_refcnt --;
+ if (cp->cook_refcnt == 0) {
+ gfxp_umem_cookie_destroy(dhp->dh_cookie);
+ dhp->dh_cookie = NULL;
+ }
+ mutex_exit(&statep->dis_ctxlock);
+}
+
+
+/*ARGSUSED*/
+static int
+drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
+ void **new_pvtp)
+{
+ devmap_handle_t *dhp;
+ drm_inst_state_t *statep;
+ struct ddi_umem_cookie *cp;
+
+ statep = (drm_inst_state_t *)pvtp;
+ mutex_enter(&statep->dis_ctxlock);
+ dhp = (devmap_handle_t *)dhc;
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ cp->cook_refcnt ++;
+ mutex_exit(&statep->dis_ctxlock);
+ *new_pvtp = statep;
+
+ return (0);
+}
+
+int
+drm_dev_to_instance(dev_t dev)
+{
+ return (DEV2INST(dev));
+}
+
+/*
+ * drm_supp_alloc_drv_entry()
+ *
+ * Description:
+ * Create a DRM entry and add it into the instance list (drm_inst_head).
+ * Note that we don't allow a duplicated entry
+ */
+static drm_inst_list_t *
+drm_supp_alloc_drv_entry(dev_info_t *dip)
+{
+ drm_inst_list_t **plist;
+ drm_inst_list_t *list;
+ drm_inst_list_t *entry;
+
+ /* protect the driver list */
+ mutex_enter(&drm_inst_list_lock);
+ plist = &drm_inst_head;
+ list = *plist;
+ while (list) {
+ if (list->disl_state.mis_dip == dip) {
+ mutex_exit(&drm_inst_list_lock);
+ cmn_err(CE_WARN, "%s%d already registered",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (NULL);
+ }
+ plist = &list->disl_next;
+ list = list->disl_next;
+ }
+
+ /* "dip" is not registered, create new one and add to list */
+ entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
+ *plist = entry;
+ entry->disl_state.mis_dip = dip;
+ mutex_init(&entry->disl_state.mis_lock, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&entry->disl_state.dis_ctxlock, NULL, MUTEX_DRIVER, NULL);
+ mutex_exit(&drm_inst_list_lock);
+
+ return (entry);
+
+} /* drm_supp_alloc_drv_entry */
+
+/*
+ * drm_supp_free_drv_entry()
+ */
+static void
+drm_supp_free_drv_entry(dev_info_t *dip)
+{
+ drm_inst_list_t *list;
+ drm_inst_list_t **plist;
+ drm_inst_state_t *mstate;
+
+ /* protect the driver list */
+ mutex_enter(&drm_inst_list_lock);
+ plist = &drm_inst_head;
+ list = *plist;
+ while (list) {
+ if (list->disl_state.mis_dip == dip) {
+ *plist = list->disl_next;
+ mstate = &list->disl_state;
+ mutex_destroy(&mstate->mis_lock);
+ mutex_destroy(&mstate->dis_ctxlock);
+ kmem_free(list, sizeof (*list));
+ mutex_exit(&drm_inst_list_lock);
+ return;
+ }
+ plist = &list->disl_next;
+ list = list->disl_next;
+ }
+ mutex_exit(&drm_inst_list_lock);
+
+} /* drm_supp_free_drv_entry() */
+
+/*
+ * drm_sup_devt_to_state()
+ *
+ * description:
+ * Get the soft state of DRM instance by device number
+ */
+static drm_inst_state_t *
+drm_sup_devt_to_state(dev_t dev)
+{
+ drm_inst_list_t *list;
+ drm_inst_state_t *mstate;
+ major_t major = getmajor(dev);
+ int instance = DEV2INST(dev);
+
+ mutex_enter(&drm_inst_list_lock);
+ list = drm_inst_head;
+ while (list) {
+ mstate = &list->disl_state;
+ mutex_enter(&mstate->mis_lock);
+
+ if ((mstate->mis_major == major) &&
+ (ddi_get_instance(mstate->mis_dip) == instance)) {
+ mutex_exit(&mstate->mis_lock);
+ mutex_exit(&drm_inst_list_lock);
+ return (mstate);
+ }
+
+ list = list->disl_next;
+ mutex_exit(&mstate->mis_lock);
+ }
+
+ mutex_exit(&drm_inst_list_lock);
+ return (NULL);
+
+} /* drm_sup_devt_to_state() */
+
+int
+drm_supp_get_irq(void *handle)
+{
+ drm_inst_list_t *list;
+ drm_inst_state_t *mstate;
+ int irq;
+
+ list = (drm_inst_list_t *)handle;
+ mstate = &list->disl_state;
+ ASSERT(mstate != NULL);
+ irq = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_ILINE);
+ return (irq);
+}
+
+int
+drm_supp_device_capability(void *handle, int capid)
+{
+ drm_inst_list_t *list;
+ drm_inst_state_t *mstate;
+ uint8_t cap = 0;
+ uint16_t caps_ptr;
+
+ list = (drm_inst_list_t *)handle;
+ mstate = &list->disl_state;
+ ASSERT(mstate != NULL);
+
+ /* has capabilities list ? */
+ if ((pci_config_get16(mstate->mis_cfg_hdl, PCI_CONF_STAT) &
+ PCI_CONF_CAP_MASK) == 0)
+ return (NULL);
+
+ caps_ptr = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_CAP_PTR);
+ while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
+ cap = pci_config_get32(mstate->mis_cfg_hdl, caps_ptr);
+ if ((cap & PCI_CONF_CAPID_MASK) == capid)
+ return (cap);
+ caps_ptr = pci_config_get8(mstate->mis_cfg_hdl,
+ caps_ptr + PCI_CAP_NEXT_PTR);
+ }
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sunmod.h b/usr/src/uts/common/io/drm/drm_sunmod.h
new file mode 100644
index 0000000..32cd5c0
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sunmod.h
@@ -0,0 +1,160 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Common misc module interfaces of DRM under Solaris
+ */
+
+/*
+ * I915 DRM Driver for Solaris
+ *
+ * This driver provides the hardware 3D acceleration support for Intel
+ * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
+ * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
+ * means the kernel device driver in DRI.
+ *
+ * I915 driver is a device dependent driver only, it depends on a misc module
+ * named drm for generic DRM operations.
+ *
+ * This driver also calls into gfx and agpmaster misc modules respectively for
+ * generic graphics operations and AGP master device support.
+ */
+
+#ifndef _SYS_DRM_SUNMOD_H_
+#define _SYS_DRM_SUNMOD_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/visual_io.h>
+#include <sys/font.h>
+#include <sys/fbio.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/modctl.h>
+#include <sys/vgareg.h>
+#include <sys/vgasubr.h>
+#include <sys/pci.h>
+#include <sys/kd.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/sunldi.h>
+#include <sys/mkdev.h>
+#include <gfx_private.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+#include "drmP.h"
+#include <sys/modctl.h>
+
+/*
+ * dev_t of this driver looks consists of:
+ *
+ * major number with NBITSMAJOR bits
+ * instance node number with NBITSINST bits
+ * minor node number with NBITSMINOR - NBITSINST bits
+ *
+ * Each instance has at most 2^(NBITSMINOR - NBITSINST) minor nodes, the first
+ * three are:
+ * 0: gfx<instance number>, graphics common node
+ * 1: agpmaster<instance number>, agpmaster node
+ * 2: drm<instance number>, drm node
+ */
+#define GFX_MINOR 0
+#define AGPMASTER_MINOR 1
+#define DRM_MINOR 2
+#define DRM_MIN_CLONEMINOR 3
+
+/*
+ * Number of bits occupied by instance number in dev_t, currently maximum 8
+ * instances are supported.
+ */
+#define NBITSINST 3
+
+/* Number of bits occupied in dev_t by minor node */
+#define NBITSMNODE (18 - NBITSINST)
+
+/*
+ * DRM use a "cloning" minor node mechanism to release lock on every close(2),
+ * thus there will be a minor node for every open(2) operation. Here we give
+ * the maximum DRM cloning minor node number.
+ */
+#define MAX_CLONE_MINOR (1 << (NBITSMNODE) - 1)
+#define DEV2MINOR(dev) (getminor(dev) & ((1 << (NBITSMNODE)) - 1))
+#define DEV2INST(dev) (getminor(dev) >> NBITSMNODE)
+#define INST2NODE0(inst) ((inst) << NBITSMNODE)
+#define INST2NODE1(inst) (((inst) << NBITSMNODE) + AGPMASTER_MINOR)
+#define INST2NODE2(inst) (((inst) << NBITSMNODE) + DRM_MINOR)
+
+/* graphics name for the common graphics minor node */
+#define GFX_NAME "gfx"
+
+
+/*
+ * softstate for DRM module
+ */
+typedef struct drm_instance_state {
+ kmutex_t mis_lock;
+ kmutex_t dis_ctxlock;
+ major_t mis_major;
+ dev_info_t *mis_dip;
+ drm_device_t *mis_devp;
+ ddi_acc_handle_t mis_cfg_hdl;
+ agp_master_softc_t *mis_agpm; /* agpmaster softstate ptr */
+ gfxp_vgatext_softc_ptr_t mis_gfxp; /* gfx softstate */
+} drm_inst_state_t;
+
+
+struct drm_inst_state_list {
+ drm_inst_state_t disl_state;
+ struct drm_inst_state_list *disl_next;
+
+};
+typedef struct drm_inst_state_list drm_inst_list_t;
+
+
+/* Identifier of this driver */
+static struct vis_identifier text_ident = { "SUNWdrm" };
+static int drm_sun_open(dev_t *, int, int, cred_t *);
+static int drm_sun_close(dev_t, int, int, cred_t *);
+static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int drm_sun_devmap(dev_t, devmap_cookie_t,
+ offset_t, size_t, size_t *, uint_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DRM_SUNMOD_H_ */
diff --git a/usr/src/uts/common/io/drm/queue.h b/usr/src/uts/common/io/drm/queue.h
new file mode 100644
index 0000000..4994209
--- /dev/null
+++ b/usr/src/uts/common/io/drm/queue.h
@@ -0,0 +1,585 @@
+/* BEGIN CSTYLED */
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD: /repoman/r/ncvs/src/sys/sys/queue.h,v 1.66 2006/05/26 18:17:53 emaste Exp $
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ * SLIST LIST STAILQ TAILQ
+ * _HEAD + + + +
+ * _HEAD_INITIALIZER + + + +
+ * _ENTRY + + + +
+ * _INIT + + + +
+ * _EMPTY + + + +
+ * _FIRST + + + +
+ * _NEXT + + + +
+ * _PREV - - - +
+ * _LAST - - + +
+ * _FOREACH + + + +
+ * _FOREACH_SAFE + + + +
+ * _FOREACH_REVERSE - - - +
+ * _FOREACH_REVERSE_SAFE - - - +
+ * _INSERT_HEAD + + + +
+ * _INSERT_BEFORE - + - +
+ * _INSERT_AFTER + + + +
+ * _INSERT_TAIL - - + +
+ * _CONCAT - - + +
+ * _REMOVE_HEAD + - + -
+ * _REMOVE + + + +
+ *
+ */
+#ifdef QUEUE_MACRO_DEBUG
+/* Store the last 2 places the queue element or head was altered */
+struct qm_trace {
+ char * lastfile;
+ int lastline;
+ char * prevfile;
+ int prevline;
+};
+
+#define TRACEBUF struct qm_trace trace;
+#define TRASHIT(x) do {(x) = (void *)-1;} while (*"\0")
+
+#define QMD_TRACE_HEAD(head) do { \
+ (head)->trace.prevline = (head)->trace.lastline; \
+ (head)->trace.prevfile = (head)->trace.lastfile; \
+ (head)->trace.lastline = __LINE__; \
+ (head)->trace.lastfile = __FILE__; \
+} while (*"\0")
+
+#define QMD_TRACE_ELEM(elem) do { \
+ (elem)->trace.prevline = (elem)->trace.lastline; \
+ (elem)->trace.prevfile = (elem)->trace.lastfile; \
+ (elem)->trace.lastline = __LINE__; \
+ (elem)->trace.lastfile = __FILE__; \
+} while (*"\0")
+
+#else
+#define QMD_TRACE_ELEM(elem)
+#define QMD_TRACE_HEAD(head)
+#define TRACEBUF
+#define TRASHIT(x)
+#endif /* QUEUE_MACRO_DEBUG */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+
+#define SLIST_FIRST(head) ((head)->slh_first)
+
+#define SLIST_FOREACH(var, head, field) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var); \
+ (var) = SLIST_NEXT((var), field))
+
+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for ((varp) = &SLIST_FIRST((head)); \
+ ((var) = *(varp)) != NULL; \
+ (varp) = &SLIST_NEXT((var), field))
+
+#define SLIST_INIT(head) do { \
+ SLIST_FIRST((head)) = NULL; \
+} while (*"\0")
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
+ SLIST_NEXT((slistelm), field) = (elm); \
+} while (*"\0")
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
+ SLIST_FIRST((head)) = (elm); \
+} while (*"\0")
+
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ if (SLIST_FIRST((head)) == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = SLIST_FIRST((head)); \
+ while (SLIST_NEXT(curelm, field) != (elm)) \
+ curelm = SLIST_NEXT(curelm, field); \
+ SLIST_NEXT(curelm, field) = \
+ SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
+ } \
+ TRASHIT((elm)->field.sle_next); \
+} while (*"\0")
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
+} while (*"\0")
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first;/* first element */ \
+ struct type **stqh_last;/* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_CONCAT(head1, head2) do { \
+ if (!STAILQ_EMPTY((head2))) { \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_INIT((head2)); \
+ } \
+} while (*"\0")
+
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for((var) = STAILQ_FIRST((head)); \
+ (var); \
+ (var) = STAILQ_NEXT((var), field))
+
+
+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = STAILQ_FIRST((head)); \
+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define STAILQ_INIT(head) do { \
+ STAILQ_FIRST((head)) = NULL; \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (*"\0")
+
+#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_NEXT((tqelm), field) = (elm); \
+} while (*"\0")
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_FIRST((head)) = (elm); \
+} while (*"\0")
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ STAILQ_NEXT((elm), field) = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (*"\0")
+
+#define STAILQ_LAST(head, type, field) \
+ (STAILQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
+
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ if (STAILQ_FIRST((head)) == (elm)) { \
+ STAILQ_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = STAILQ_FIRST((head)); \
+ while (STAILQ_NEXT(curelm, field) != (elm)) \
+ curelm = STAILQ_NEXT(curelm, field); \
+ if ((STAILQ_NEXT(curelm, field) = \
+ STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((curelm), field);\
+ } \
+ TRASHIT((elm)->field.stqe_next); \
+} while (*"\0")
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if ((STAILQ_FIRST((head)) = \
+ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (*"\0")
+
+#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
+ if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (*"\0")
+
+/*
+ * List declarations.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define QMD_LIST_CHECK_HEAD(head, field) do { \
+ if (LIST_FIRST((head)) != NULL && \
+ LIST_FIRST((head))->field.le_prev != \
+ &LIST_FIRST((head))) \
+ panic("Bad list head %p first->prev != head", (head)); \
+} while (*"\0")
+
+#define QMD_LIST_CHECK_NEXT(elm, field) do { \
+ if (LIST_NEXT((elm), field) != NULL && \
+ LIST_NEXT((elm), field)->field.le_prev != \
+ &((elm)->field.le_next)) \
+ panic("Bad link elm %p next->prev != elm", (elm)); \
+} while (*"\0")
+
+#define QMD_LIST_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.le_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (elm)); \
+} while (*"\0")
+#else
+#define QMD_LIST_CHECK_HEAD(head, field)
+#define QMD_LIST_CHECK_NEXT(elm, field)
+#define QMD_LIST_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+
+#define LIST_FIRST(head) ((head)->lh_first)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = LIST_FIRST((head)); \
+ (var); \
+ (var) = LIST_NEXT((var), field))
+
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = LIST_FIRST((head)); \
+ (var) && ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define LIST_INIT(head) do { \
+ LIST_FIRST((head)) = NULL; \
+} while (*"\0")
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ QMD_LIST_CHECK_NEXT(listelm, field); \
+ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+ LIST_NEXT((listelm), field)->field.le_prev = \
+ &LIST_NEXT((elm), field); \
+ LIST_NEXT((listelm), field) = (elm); \
+ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
+} while (*"\0")
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_LIST_CHECK_PREV(listelm, field); \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ LIST_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
+} while (*"\0")
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ QMD_LIST_CHECK_HEAD((head), field); \
+ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
+ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+ LIST_FIRST((head)) = (elm); \
+ (elm)->field.le_prev = &LIST_FIRST((head)); \
+} while (*"\0")
+
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_REMOVE(elm, field) do { \
+ QMD_LIST_CHECK_NEXT(elm, field); \
+ QMD_LIST_CHECK_PREV(elm, field); \
+ if (LIST_NEXT((elm), field) != NULL) \
+ LIST_NEXT((elm), field)->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = LIST_NEXT((elm), field); \
+ TRASHIT((elm)->field.le_next); \
+ TRASHIT((elm)->field.le_prev); \
+} while (*"\0")
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+ TRACEBUF \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+ TRACEBUF \
+}
+
+/*
+ * Tail queue functions.
+ */
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
+ if (!TAILQ_EMPTY(head) && \
+ TAILQ_FIRST((head))->field.tqe_prev != \
+ &TAILQ_FIRST((head))) \
+ panic("Bad tailq head %p first->prev != head", (head)); \
+} while (*"\0")
+
+#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
+ if (*(head)->tqh_last != NULL) \
+ panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
+} while (*"\0")
+
+#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
+ if (TAILQ_NEXT((elm), field) != NULL && \
+ TAILQ_NEXT((elm), field)->field.tqe_prev != \
+ &((elm)->field.tqe_next)) \
+ panic("Bad link elm %p next->prev != elm", (elm)); \
+} while (*"\0")
+
+#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.tqe_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (elm)); \
+} while (*"\0")
+#else
+#define QMD_TAILQ_CHECK_HEAD(head, field)
+#define QMD_TAILQ_CHECK_TAIL(head, headname)
+#define QMD_TAILQ_CHECK_NEXT(elm, field)
+#define QMD_TAILQ_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define TAILQ_CONCAT(head1, head2, field) do { \
+ if (!TAILQ_EMPTY(head2)) { \
+ *(head1)->tqh_last = (head2)->tqh_first; \
+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ TAILQ_INIT((head2)); \
+ QMD_TRACE_HEAD(head1); \
+ QMD_TRACE_HEAD(head2); \
+ } \
+} while (*"\0")
+
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var); \
+ (var) = TAILQ_NEXT((var), field))
+
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var); \
+ (var) = TAILQ_PREV((var), headname, field))
+
+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_INIT(head) do { \
+ TAILQ_FIRST((head)) = NULL; \
+ (head)->tqh_last = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+} while (*"\0")
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_NEXT(listelm, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else { \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ } \
+ TAILQ_NEXT((listelm), field) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&listelm->field); \
+} while (*"\0")
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_PREV(listelm, field); \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ TAILQ_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&listelm->field); \
+} while (*"\0")
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ QMD_TAILQ_CHECK_HEAD(head, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
+ TAILQ_FIRST((head))->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_FIRST((head)) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (*"\0")
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ QMD_TAILQ_CHECK_TAIL(head, field); \
+ TAILQ_NEXT((elm), field) = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (*"\0")
+
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ QMD_TAILQ_CHECK_NEXT(elm, field); \
+ QMD_TAILQ_CHECK_PREV(elm, field); \
+ if ((TAILQ_NEXT((elm), field)) != NULL) \
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else { \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ QMD_TRACE_HEAD(head); \
+ } \
+ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
+ TRASHIT((elm)->field.tqe_next); \
+ TRASHIT((elm)->field.tqe_prev); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (*"\0")
+
+
+#ifdef _KERNEL
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_QUEUE_H_ */
+
+/* END CSTYLED */
diff --git a/usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart b/usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart
new file mode 100644
index 0000000..3bcf0d3
--- /dev/null
+++ b/usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart
@@ -0,0 +1,23 @@
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
diff --git a/usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart.descrip b/usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart.descrip
new file mode 100644
index 0000000..d22052c
--- /dev/null
+++ b/usr/src/uts/common/sys/THIRDPARTYLICENSE.agpgart.descrip
@@ -0,0 +1 @@
+AGPGART DEVICE DRIVER
diff --git a/usr/src/uts/common/sys/agp/agpamd64gart_io.h b/usr/src/uts/common/sys/agp/agpamd64gart_io.h
new file mode 100644
index 0000000..d40b9fc
--- /dev/null
+++ b/usr/src/uts/common/sys/agp/agpamd64gart_io.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_AGPAMD64GART_IO_H
+#define _SYS_AGPAMD64GART_IO_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _KERNEL
+
+#define AMD64GART_NAME "amd64_gart"
+#define CPUGART_DEVLINK "/dev/agp/cpugart"
+
+#define AGPAMD64GARTIOC_BASE 'M'
+
+#define AMD64_GET_INFO _IOR(AGPAMD64GARTIOC_BASE, 50, amdgart_info_t)
+#define AMD64_SET_GART_ADDR _IOW(AGPAMD64GARTIOC_BASE, 51, uint32_t)
+#define AMD64_FLUSH_GTLB _IO(AGPAMD64GARTIOC_BASE, 52)
+#define AMD64_CONFIGURE _IO(AGPAMD64GARTIOC_BASE, 53)
+#define AMD64_UNCONFIG _IO(AGPAMD64GARTIOC_BASE, 54)
+
+/* Used to retrieve attributes of the amd64 gart device */
+typedef struct amdgart_info {
+ uint64_t cgart_aperbase;
+ size_t cgart_apersize;
+} amdgart_info_t;
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AGPAMD64GART_IO_H */
diff --git a/usr/src/uts/common/sys/agp/agpdefs.h b/usr/src/uts/common/sys/agp/agpdefs.h
new file mode 100644
index 0000000..adda870
--- /dev/null
+++ b/usr/src/uts/common/sys/agp/agpdefs.h
@@ -0,0 +1,354 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_AGPDEFS_H
+#define _SYS_AGPDEFS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This AGP memory type is required by some hardware like i810 video
+ * card, which need physical contiguous pages to setup hardware cursor.
+ * Usually, several tens of kilo bytes are needed in this case.
+ * We use DDI DMA interfaces to allocate such memory in agpgart driver,
+ * and it can not be exported to user applications directly by calling mmap
+ * on agpgart driver. The typical usage scenario is as the following:
+ * Firstly, Xserver get the memory physical address by calling AGPIOC_ALLOCATE
+ * on agpgart driver. Secondly, Xserver use the physical address to mmap
+ * the memory to Xserver space area by xsvc driver.
+ *
+ */
+#define AGP_PHYSICAL 2 /* Only used for i810, HW curosr */
+
+#ifdef _KERNEL
+
+/* AGP space units */
+#define AGP_PAGE_SHIFT 12
+#define AGP_PAGE_SIZE (1 << AGP_PAGE_SHIFT)
+#define AGP_PAGE_OFFSET (AGP_PAGE_SIZE - 1)
+#define AGP_MB2PAGES(x) ((x) << 8)
+#define AGP_PAGES2BYTES(x) ((x) << AGP_PAGE_SHIFT)
+#define AGP_BYTES2PAGES(x) ((x) >> AGP_PAGE_SHIFT)
+#define AGP_PAGES2KB(x) ((x) << 2)
+#define AGP_ALIGNED(offset) (((offset) & AGP_PAGE_OFFSET) == 0)
+
+/* stand pci register offset */
+#define PCI_CONF_CAP_MASK 0x10
+#define PCI_CONF_CAPID_MASK 0xff
+#define PCI_CONF_NCAPID_MASK 0xff00
+
+#define INTEL_VENDOR_ID 0x8086
+#define AMD_VENDOR_ID 0x1022
+#define VENDOR_ID_MASK 0xffff
+
+/* macros for device types */
+#define DEVICE_IS_I810 11 /* intel i810 series video card */
+#define DEVICE_IS_I830 12 /* intel i830, i845, i855 series */
+#define DEVICE_IS_AGP 21 /* external AGP video card */
+#define CHIP_IS_INTEL 10 /* intel agp bridge */
+#define CHIP_IS_AMD 20 /* amd agp bridge */
+
+/* AGP bridge device id */
+#define AMD_BR_8151 0x74541022
+#define INTEL_BR_810 0x71208086
+#define INTEL_BR_810DC 0x71228086
+#define INTEL_BR_810E 0x71248086
+#define INTEL_BR_815 0x11308086 /* include 815G/EG/P/EP */
+#define INTEL_BR_830M 0x35758086
+#define INTEL_BR_845 0x25608086 /* include 845G/P */
+#define INTEL_BR_855GM 0x35808086 /* include 852GM/PM */
+#define INTEL_BR_855PM 0x33408086
+#define INTEL_BR_865 0x25708086
+#define INTEL_BR_915 0x25808086
+#define INTEL_BR_915GM 0x25908086
+#define INTEL_BR_945 0x27708086
+#define INTEL_BR_945GM 0x27a08086
+#define INTEL_BR_945GME 0x27ac8086
+#define INTEL_BR_946GZ 0x29708086
+#define INTEL_BR_965G1 0x29808086
+#define INTEL_BR_965Q 0x29908086
+#define INTEL_BR_965G2 0x29a08086
+#define INTEL_BR_965GM 0x2a008086
+#define INTEL_BR_965GME 0x2a108086
+#define INTEL_BR_Q35 0x29b08086
+#define INTEL_BR_G33 0x29c08086
+#define INTEL_BR_Q33 0x29d08086
+#define INTEL_BR_GM45 0x2a408086
+#define INTEL_BR_EL 0x2e008086
+#define INTEL_BR_Q45 0x2e108086
+#define INTEL_BR_G45 0x2e208086
+#define INTEL_BR_G41 0x2e308086
+#define INTEL_BR_IGDNG_D 0x00408086
+#define INTEL_BR_IGDNG_M 0x00448086
+#define INTEL_BR_IGDNG_MA 0x00628086
+#define INTEL_BR_IGDNG_MC2 0x006a8086
+#define INTEL_BR_B43 0x2e408086
+
+/* AGP common register offset in pci configuration space */
+#define AGP_CONF_MISC 0x51 /* one byte */
+#define AGP_CONF_CAPPTR 0x34
+#define AGP_CONF_APERBASE 0x10
+#define AGP_CONF_STATUS 0x04 /* CAP + 0x4 */
+#define AGP_CONF_COMMAND 0x08 /* CAP + 0x8 */
+
+/* AGP target register and mask defines */
+#define AGP_CONF_CONTROL 0x10 /* CAP + 0x10 */
+#define AGP_TARGET_BAR1 1
+#define AGP_32_APERBASE_MASK 0xffc00000 /* 4M aligned */
+#define AGP_64_APERBASE_MASK 0xffffc00000LL /* 4M aligned */
+#define AGP_CONF_APERSIZE 0x14 /* CAP + 0x14 */
+#define AGP_CONF_ATTBASE 0x18 /* CAP + 0x18 */
+#define AGP_ATTBASE_MASK 0xfffff000
+#define AGPCTRL_GTLBEN (0x1 << 7)
+#define AGP_APER_TYPE_MASK 0x4
+#define AGP_APER_SIZE_MASK 0xf00
+#define AGP_APER_128M_MASK 0x3f
+#define AGP_APER_4G_MASK 0xf00
+#define AGP_APER_4M 0x3f
+#define AGP_APER_8M 0x3e
+#define AGP_APER_16M 0x3c
+#define AGP_APER_32M 0x38
+#define AGP_APER_64M 0x30
+#define AGP_APER_128M 0x20
+#define AGP_APER_256M 0xf00
+#define AGP_APER_512M 0xe00
+#define AGP_APER_1024M 0xc00
+#define AGP_APER_2048M 0x800
+#define AGP_APER_4G 0x000
+#define AGP_MISC_APEN 0x2
+
+/* AGP gart table definition */
+#define AGP_ENTRY_VALID 0x1
+
+/* AGP term definitions */
+#define AGP_CAP_ID 0x2
+#define AGP_CAP_OFF_DEF 0xa0
+
+/* Intel integrated video card, chipset id */
+#define INTEL_IGD_810 0x71218086
+#define INTEL_IGD_810DC 0x71238086
+#define INTEL_IGD_810E 0x71258086
+#define INTEL_IGD_815 0x11328086
+#define INTEL_IGD_830M 0x35778086
+#define INTEL_IGD_845G 0x25628086
+#define INTEL_IGD_855GM 0x35828086
+#define INTEL_IGD_865G 0x25728086
+#define INTEL_IGD_915 0x25828086
+#define INTEL_IGD_915GM 0x25928086
+#define INTEL_IGD_945 0x27728086
+#define INTEL_IGD_945GM 0x27a28086
+#define INTEL_IGD_945GME 0x27ae8086
+#define INTEL_IGD_946GZ 0x29728086
+#define INTEL_IGD_965G1 0x29828086
+#define INTEL_IGD_965Q 0x29928086
+#define INTEL_IGD_965G2 0x29a28086
+#define INTEL_IGD_965GM 0x2a028086
+#define INTEL_IGD_965GME 0x2a128086
+#define INTEL_IGD_Q35 0x29b28086
+#define INTEL_IGD_G33 0x29c28086
+#define INTEL_IGD_Q33 0x29d28086
+#define INTEL_IGD_GM45 0x2a428086
+#define INTEL_IGD_EL 0x2e028086
+#define INTEL_IGD_Q45 0x2e128086
+#define INTEL_IGD_G45 0x2e228086
+#define INTEL_IGD_G41 0x2e328086
+#define INTEL_IGD_IGDNG_D 0x00428086
+#define INTEL_IGD_IGDNG_M 0x00468086
+#define INTEL_IGD_B43 0x2e428086
+
+/* Intel 915 and 945 series */
+#define IS_INTEL_915(device) ((device == INTEL_IGD_915) || \
+ (device == INTEL_IGD_915GM) || \
+ (device == INTEL_IGD_945) || \
+ (device == INTEL_IGD_945GM) || \
+ (device == INTEL_IGD_945GME))
+
+/* Intel 965 series */
+#define IS_INTEL_965(device) ((device == INTEL_IGD_946GZ) || \
+ (device == INTEL_IGD_965G1) || \
+ (device == INTEL_IGD_965Q) || \
+ (device == INTEL_IGD_965G2) || \
+ (device == INTEL_IGD_965GM) || \
+ (device == INTEL_IGD_965GME) || \
+ (device == INTEL_IGD_GM45) || \
+ IS_INTEL_G4X(device))
+
+/* Intel G33 series */
+#define IS_INTEL_X33(device) ((device == INTEL_IGD_Q35) || \
+ (device == INTEL_IGD_G33) || \
+ (device == INTEL_IGD_Q33))
+
+/* IGDNG */
+#define IS_IGDNG(device) ((device == INTEL_IGD_IGDNG_D) || \
+ (device == INTEL_IGD_IGDNG_M))
+
+/* Intel G4X series */
+#define IS_INTEL_G4X(device) ((device == INTEL_IGD_EL) || \
+ (device == INTEL_IGD_Q45) || \
+ (device == INTEL_IGD_G45) || \
+ (device == INTEL_IGD_G41) || \
+ IS_IGDNG(device) || \
+ (device == INTEL_IGD_B43))
+
+/* register offsets in PCI config space */
+#define I8XX_CONF_GMADR 0x10 /* GMADR of i8xx series */
+#define I915_CONF_GMADR 0x18 /* GMADR of i915 series */
+/* (Mirror) GMCH Graphics Control Register (GGC, MGGC) */
+#define I8XX_CONF_GC 0x52
+
+/* Intel integrated video card graphics mode mask */
+#define I8XX_GC_MODE_MASK 0x70
+#define IX33_GC_MODE_MASK 0xf0
+/* GTT Graphics Memory Size (9:8) in GMCH Graphics Control Register */
+#define IX33_GGMS_MASK 0x300
+/* No VT mode, 1MB allocated for GTT */
+#define IX33_GGMS_1M 0x100
+/* VT mode, 2MB allocated for GTT */
+#define IX33_GGMS_2M 0x200
+
+/* Intel integrated video card GTT definition */
+#define GTT_PAGE_SHIFT 12
+#define GTT_PAGE_SIZE (1 << GTT_PAGE_SHIFT)
+#define GTT_PAGE_OFFSET (GTT_PAGE_SIZE - 1)
+#define GTT_PTE_MASK (~GTT_PAGE_OFFSET)
+#define GTT_PTE_VALID 0x1
+#define GTT_TABLE_VALID 0x1
+#define GTT_BASE_MASK 0xfffff000
+#define GTT_MB_TO_PAGES(m) ((m) << 8)
+#define GTT_POINTER_MASK 0xffffffff00000000
+
+/* Intel i810 register offset */
+#define I810_POINTER_MASK 0xffffffffc0000000
+#define I810_CONF_SMRAM 0x70 /* offset in PCI config space */
+#define I810_GMS_MASK 0xc0 /* smram register mask */
+/*
+ * GART and GTT entry format table
+ *
+ * AMD64 GART entry
+ * from bios and kernel develop guide for amd64
+ * -----------------------------
+ * Bits Description |
+ * 0 valid |
+ * 1 coherent |
+ * 3:2 reserved |
+ * 11:4 physaddr[39:32] |
+ * 31:12 physaddr[31:12] |
+ * -----------------------------
+ * Intel GTT entry
+ * Intel video programming manual
+ * -----------------------------
+ * Bits descrition |
+ * 0 valid |
+ * 2:1 memory type |
+ * 29:12 PhysAddr[29:12] |
+ * 31:30 reserved |
+ * -----------------------------
+ * AGP entry
+ * from AGP protocol 3.0
+ * -----------------------------
+ * Bits descrition |
+ * 0 valid |
+ * 1 coherent |
+ * 3:2 reserved |
+ * 11:4 PhysAddr[39:32] |
+ * 31:12 PhysAddr[31:12] |
+ * 63:32 PhysAddr[71:40] |
+ * -----------------------------
+ */
+
+/*
+ * gart and gtt table base register format
+ *
+ * AMD64 register format
+ * from bios and kernel develop guide for AMD64
+ * ---------------------------------------------
+ * Bits Description |
+ * 3:0 reserved |
+ * 31:4 physical addr 39:12 |
+ * ----------------------------------------------
+ * INTEL AGPGART table base register format
+ * from AGP protocol 3.0 p142, only support 32 bits
+ * ---------------------------------------------
+ * Bits Description |
+ * 11:0 reserved |
+ * 31:12 physical addr 31:12 |
+ * 63:32 physical addr 63:32 |
+ * ---------------------------------------------
+ * INTEL i810 GTT table base register format
+ * _____________________________________________
+ * Bits Description |
+ * 0 GTT table enable bit |
+ * 11:1 reserved |
+ * 31:12 physical addr 31:12 |
+ * ---------------------------------------------
+ */
+
+/* Intel agp bridge specific */
+#define AGP_INTEL_POINTER_MASK 0xffffffff00000000
+
+/* Amd64 cpu gart device reigster offset */
+#define AMD64_APERTURE_CONTROL 0x90
+#define AMD64_APERTURE_BASE 0x94
+#define AMD64_GART_CACHE_CTL 0x9c
+#define AMD64_GART_BASE 0x98
+
+/* Amd64 cpu gart bits */
+#define AMD64_APERBASE_SHIFT 25
+#define AMD64_APERBASE_MASK 0x00007fff
+#define AMD64_GARTBASE_SHIFT 8
+#define AMD64_GARTBASE_MASK 0xfffffff0
+#define AMD64_POINTER_MASK 0xffffff0000000000
+#define AMD64_INVALID_CACHE 0x1
+#define AMD64_GART_SHIFT 12
+#define AMD64_RESERVE_SHIFT 4
+#define AMD64_APERSIZE_MASK 0xe
+#define AMD64_GARTEN 0x1
+#define AMD64_DISGARTCPU 0x10
+#define AMD64_DISGARTIO 0x20
+#define AMD64_ENTRY_VALID 0x1
+
+/* Other common routines */
+#define MB2BYTES(m) ((m) << 20)
+#define BYTES2MB(m) ((m) >> 20)
+#define GIGA_MASK 0xC0000000
+#define UI32_MASK 0xffffffffU
+#define MAXAPERMEGAS 0x1000 /* Aper size no more than 4G */
+#define MINAPERMEGAS 192
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AGPDEFS_H */
diff --git a/usr/src/uts/common/sys/agp/agpgart_impl.h b/usr/src/uts/common/sys/agp/agpgart_impl.h
new file mode 100644
index 0000000..a8f5b6b
--- /dev/null
+++ b/usr/src/uts/common/sys/agp/agpgart_impl.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_AGPGART_IMPL_H
+#define _SYS_AGPGART_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifdef _KERNEL
+
+#define AGPGART_MAX_INSTANCES 1
+#define AGP_MAXKEYS 256
+#define AGPGART_DEVNODE "agpgart"
+
+/*
+ * The values of type agp_arc_type_t are used as indexes into arc_name
+ * in agp_kstat.c.
+ * So if agp_arc_type_t's values are changed in the future, the content
+ * of arc_name must be changed accordingly.
+ */
+enum agp_arc_type {
+ ARC_IGD810 = 0,
+ ARC_IGD830 = 1,
+ ARC_INTELAGP = 2,
+ ARC_AMD64AGP = 3,
+ ARC_UNKNOWN = 5
+};
+typedef enum agp_arc_type agp_arc_type_t;
+
+/* linked list structure of multiple agp gart devices access handles */
+typedef struct amd64_gart_dev_list {
+ ldi_handle_t gart_devhdl;
+ struct amd64_gart_dev_list *next;
+} amd64_gart_dev_list_t;
+
+typedef struct amd64_garts_dev {
+ int gart_device_num;
+ amd64_gart_dev_list_t *gart_dev_list_head;
+} amd64_garts_dev_t;
+
+/*
+ * AGP target and master device register their config space access
+ * interface here.
+ * In AMD64, gart_device_num is the number of hostbridge (device(1100, 1022))
+ * refer to <<Bios and Kernel Developer's Guide for AMD athlon64 and operton>>
+ */
+typedef struct agp_registered_dev {
+ amd64_garts_dev_t agprd_cpugarts;
+ ldi_handle_t agprd_targethdl;
+ ldi_handle_t agprd_masterhdl;
+ agp_arc_type_t agprd_arctype; /* system types */
+} agp_registered_dev_t;
+
+/*
+ * If the OS have direct mapping support for mapping physical page frames
+ * directly to user address, we use this struct for memory
+ * allocation.
+ */
+typedef struct agp_pmem_handle {
+ devmap_pmem_cookie_t pmem_cookie;
+} agp_pmem_handle_t;
+
+/*
+ * This struct is used for DDI-compliant memory allocations.
+ */
+typedef struct agp_kmem_handle {
+ ddi_dma_handle_t kmem_handle;
+ ddi_dma_cookie_t kmem_dcookie;
+ uint32_t kmem_cookies_num;
+ caddr_t kmem_kvaddr;
+ size_t kmem_reallen;
+ ddi_acc_handle_t kmem_acchdl;
+} agp_kmem_handle_t;
+
+typedef struct keytable_ent {
+ int kte_type; /* agp memory type */
+ int kte_key; /* memory key */
+ uint32_t kte_pgoff; /* aperture offset bound in pages */
+ pgcnt_t kte_pages; /* user-requested size in pages */
+ int kte_bound; /* bound to gart table */
+ void *kte_memhdl; /* agp_kmem or agp_pmem handle */
+ pfn_t *kte_pfnarray; /* page frame numbers allocated */
+ int kte_refcnt; /* reference count */
+} keytable_ent_t;
+
+typedef struct key_list {
+ int key_idx;
+ struct key_list *next;
+} key_list_t;
+
+/*
+ * for kstat
+ */
+typedef struct agp_kern_info {
+ uint32_t agpki_mdevid;
+ agp_version_t agpki_mver;
+ uint32_t agpki_mstatus;
+ size_t agpki_presize; /* valid only for IGD, in KB */
+ uint32_t agpki_tdevid;
+ agp_version_t agpki_tver;
+ uint32_t agpki_tstatus;
+ uint64_t agpki_aperbase;
+ uint32_t agpki_apersize; /* in MB */
+} agp_kern_info_t;
+
+#ifdef _MULTI_DATAMODEL
+typedef struct _agp_info32 {
+ agp_version_t agpi32_version;
+ uint32_t agpi32_devid; /* device VID + DID */
+ uint32_t agpi32_mode; /* mode of bridge */
+ uint32_t agpi32_aperbase; /* base of aperture */
+ uint32_t agpi32_apersize; /* in MB */
+ uint32_t agpi32_pgtotal; /* max number of pages */
+ uint32_t agpi32_pgsystem; /* same as pg_total */
+ uint32_t agpi32_pgused; /* pages consumed */
+} agp_info32_t;
+#endif /* _MULTI_DATAMODEL */
+
+struct list_head {
+ struct list_head *next, *prev;
+ struct igd_gtt_seg *gttseg;
+};
+
+
+typedef struct agpgart_softstate {
+ dev_info_t *asoft_dip;
+ kmutex_t asoft_instmutex;
+ agp_kern_info_t asoft_info;
+ int asoft_opened; /* 0 not opened, non-0 opened */
+ int asoft_acquired; /* 0 released, 1 acquired */
+ int asoft_agpen; /* 0 disbaled, 1 enabled */
+ pid_t asoft_curpid; /* the process accquiring gart */
+ uint32_t asoft_mode; /* agp mode be set */
+ uint32_t asoft_pgtotal; /* total available pages */
+ uint32_t asoft_pgused; /* pages already used */
+ /* resource handles */
+ ldi_ident_t asoft_li; /* for ldi ops */
+ keytable_ent_t *asoft_table; /* key table for all allocated table */
+ ddi_dma_handle_t gart_dma_handle; /* for GATT table */
+ ddi_acc_handle_t gart_dma_acc_handle; /* for GATT table */
+
+ /* gart table info */
+ uint64_t gart_pbase; /* gart table physical address */
+ caddr_t gart_vbase; /* kernel-vir addr for GATT table */
+ size_t gart_size; /* the size of aperture in megabytes */
+ /* all registered agp device in here */
+ agp_registered_dev_t asoft_devreg;
+ kstat_t *asoft_ksp;
+ struct list_head mapped_list;
+} agpgart_softstate_t;
+
+typedef struct agpgart_ctx {
+ offset_t actx_off;
+ agpgart_softstate_t *actx_sc;
+} agpgart_ctx_t;
+
+#define KMEMP(p) ((agp_kmem_handle_t *)p)
+#define PMEMP(p) ((agp_pmem_handle_t *)p)
+
+int agp_init_kstats(agpgart_softstate_t *);
+void agp_fini_kstats(agpgart_softstate_t *);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AGPGART_IMPL_H */
diff --git a/usr/src/uts/common/sys/agp/agpmaster_io.h b/usr/src/uts/common/sys/agp/agpmaster_io.h
new file mode 100644
index 0000000..1202e76
--- /dev/null
+++ b/usr/src/uts/common/sys/agp/agpmaster_io.h
@@ -0,0 +1,100 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_AGPMASTER_IO_H
+#define _SYS_AGPMASTER_IO_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _KERNEL
+
+#define AGPMASTER_NAME "agpmaster"
+#define AGPMASTER_DEVLINK "/dev/agp/agpmaster"
+
+/* macros for layered ioctls */
+#define AGPMASTERIOC_BASE 'M'
+#define DEVICE_DETECT _IOR(AGPMASTERIOC_BASE, 10, int)
+#define I8XX_GET_INFO _IOR(AGPMASTERIOC_BASE, 11, igd_info_t)
+#define I810_SET_GTT_BASE _IOW(AGPMASTERIOC_BASE, 12, uint32_t)
+#define I8XX_ADD2GTT _IOW(AGPMASTERIOC_BASE, 13, igd_gtt_seg_t)
+#define I8XX_REM_GTT _IOW(AGPMASTERIOC_BASE, 14, igd_gtt_seg_t)
+#define I8XX_UNCONFIG _IO(AGPMASTERIOC_BASE, 16)
+#define AGP_MASTER_GETINFO _IOR(AGPMASTERIOC_BASE, 20, agp_info_t)
+#define AGP_MASTER_SETCMD _IOW(AGPMASTERIOC_BASE, 21, uint32_t)
+
+/* used for IGD to bind/unbind gtt entries */
+typedef struct igd_gtt_seg {
+ uint32_t igs_pgstart;
+ uint32_t igs_npage;
+ uint32_t *igs_phyaddr; /* pointer to address array */
+ uint32_t igs_type; /* reserved for other memory type */
+} igd_gtt_seg_t;
+
+/* used for IGD to get info */
+typedef struct igd_info {
+ uint32_t igd_devid;
+ uint32_t igd_aperbase;
+ size_t igd_apersize; /* in MB */
+} igd_info_t;
+
+typedef struct gtt_impl {
+ ddi_acc_handle_t gtt_mmio_handle; /* mmaped graph registers */
+ caddr_t gtt_mmio_base; /* pointer to register base */
+ ddi_acc_handle_t gtt_handle; /* GTT table */
+ caddr_t gtt_addr; /* pointer to gtt */
+ igd_info_t gtt_info; /* for I8XX_GET_INFO ioctl */
+} gtt_impl_t;
+
+typedef struct agp_master_softc {
+ uint32_t agpm_id; /* agp master device id */
+ ddi_acc_handle_t agpm_acc_hdl; /* agp master pci conf handle */
+ int agpm_dev_type; /* which agp device type */
+ union {
+ off_t agpm_acaptr; /* AGP capability reg pointer */
+ gtt_impl_t agpm_gtt; /* for gtt table */
+ } agpm_data;
+} agp_master_softc_t;
+
+extern int agpmaster_attach(dev_info_t *, agp_master_softc_t **,
+ ddi_acc_handle_t, minor_t);
+extern void agpmaster_detach(agp_master_softc_t **);
+extern int agpmaster_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
+ cred_t *cred, int *rval, agp_master_softc_t *softc);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AGPMASTER_IO_H */
diff --git a/usr/src/uts/common/sys/agp/agptarget_io.h b/usr/src/uts/common/sys/agp/agptarget_io.h
new file mode 100644
index 0000000..316e2ab
--- /dev/null
+++ b/usr/src/uts/common/sys/agp/agptarget_io.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_AGPTARGET_IO_H
+#define _SYS_AGPTARGET_IO_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _KERNEL
+
+#define AGPTARGET_NAME "agptarget"
+#define AGPTARGET_DEVLINK "/dev/agp/agptarget"
+
+/* macros for layered ioctls */
+#define AGPTARGETIOC_BASE 'M'
+#define CHIP_DETECT _IOR(AGPTARGETIOC_BASE, 30, int)
+#define I8XX_GET_PREALLOC_SIZE _IOR(AGPTARGETIOC_BASE, 31, size_t)
+#define AGP_TARGET_GETINFO _IOR(AGPTARGETIOC_BASE, 32, i_agp_info_t)
+#define AGP_TARGET_SET_GATTADDR _IOW(AGPTARGETIOC_BASE, 33, uint32_t)
+#define AGP_TARGET_SETCMD _IOW(AGPTARGETIOC_BASE, 34, uint32_t)
+#define AGP_TARGET_FLUSH_GTLB _IO(AGPTARGETIOC_BASE, 35)
+#define AGP_TARGET_CONFIGURE _IO(AGPTARGETIOC_BASE, 36)
+#define AGP_TARGET_UNCONFIG _IO(AGPTARGETIOC_BASE, 37)
+#define INTEL_CHIPSET_FLUSH_SETUP _IO(AGPTARGETIOC_BASE, 38)
+#define INTEL_CHIPSET_FLUSH _IO(AGPTARGETIOC_BASE, 39)
+#define INTEL_CHIPSET_FLUSH_FREE _IO(AGPTARGETIOC_BASE, 40)
+
+/* Internal agp info struct */
+typedef struct _i_agp_info {
+ agp_version_t iagp_ver;
+ uint32_t iagp_devid; /* bridge vendor + device */
+ uint32_t iagp_mode; /* mode of brdige */
+ uint64_t iagp_aperbase; /* base of aperture */
+ size_t iagp_apersize; /* aperture range size in bytes */
+} i_agp_info_t;
+
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AGPTARGET_IO_H */
diff --git a/usr/src/uts/common/sys/agpgart.h b/usr/src/uts/common/sys/agpgart.h
new file mode 100644
index 0000000..c8770ae
--- /dev/null
+++ b/usr/src/uts/common/sys/agpgart.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Copyright (c) 2000 Doug Rabson
+ * Copyright (c) 2009, Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _SYS_AGPGART_H
+#define _SYS_AGPGART_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AGP_NORMAL 0 /* mapped to user land, no cache */
+
+typedef struct _agp_version {
+ uint16_t agpv_major;
+ uint16_t agpv_minor;
+} agp_version_t;
+
+
+typedef struct _agp_info {
+ agp_version_t agpi_version;
+ uint32_t agpi_devid; /* bridge vendor + device */
+ uint32_t agpi_mode; /* mode of brdige */
+ ulong_t agpi_aperbase; /* base of aperture */
+ size_t agpi_apersize; /* aperture range size */
+ uint32_t agpi_pgtotal; /* max number of pages in aperture */
+ uint32_t agpi_pgsystem; /* same as pg_total */
+ uint32_t agpi_pgused; /* NUMBER of currently used pages */
+} agp_info_t;
+
+typedef struct _agp_setup {
+ uint32_t agps_mode;
+} agp_setup_t;
+
+typedef struct _agp_allocate {
+ int32_t agpa_key;
+ uint32_t agpa_pgcount;
+ uint32_t agpa_type;
+ uint32_t agpa_physical; /* for i810 only, private */
+} agp_allocate_t;
+
+typedef struct _agp_bind_pages {
+ uint32_t agpb_pgstart;
+ pfn_t *agpb_pages;
+ unsigned long agpb_pgcount;
+} agp_bind_pages_t;
+
+typedef struct _agp_unbind_pages {
+ uint32_t agpb_pgstart;
+ unsigned long agpb_pgcount;
+ uint32_t agpb_type;
+} agp_unbind_pages_t;
+
+typedef struct _agp_bind {
+ int32_t agpb_key;
+ uint32_t agpb_pgstart;
+} agp_bind_t;
+
+typedef struct _agp_unbind {
+ int32_t agpu_key;
+ uint32_t agpu_pri; /* no use in solaris */
+} agp_unbind_t;
+
+#define AGPIOC_BASE 'G'
+#define AGPIOC_INFO _IOR(AGPIOC_BASE, 0, 100)
+#define AGPIOC_ACQUIRE _IO(AGPIOC_BASE, 1)
+#define AGPIOC_RELEASE _IO(AGPIOC_BASE, 2)
+#define AGPIOC_SETUP _IOW(AGPIOC_BASE, 3, agp_setup_t)
+#define AGPIOC_ALLOCATE _IOWR(AGPIOC_BASE, 4, agp_allocate_t)
+#define AGPIOC_DEALLOCATE _IOW(AGPIOC_BASE, 5, int)
+#define AGPIOC_BIND _IOW(AGPIOC_BASE, 6, agp_bind_t)
+#define AGPIOC_UNBIND _IOW(AGPIOC_BASE, 7, agp_unbind_t)
+#define AGPIOC_IOREMAP _IO(AGPIOC_BASE, 8)
+#define AGPIOC_IOREMAP_FREE _IO(AGPIOC_BASE, 9)
+#define AGPIOC_READ _IO(AGPIOC_BASE, 10)
+#define AGPIOC_WRITE _IO(AGPIOC_BASE, 11)
+#define AGPIOC_FLUSHCHIPSET _IO(AGPIOC_BASE, 12)
+#define AGPIOC_PAGES_BIND _IOW(AGPIOC_BASE, 13, agp_bind_pages_t)
+#define AGPIOC_PAGES_UNBIND _IOW(AGPIOC_BASE, 14, agp_unbind_pages_t)
+#define AGPIOC_PAGES_REBIND _IO(AGPIOC_BASE, 15)
+
+/* AGP status register bits definition */
+#define AGPSTAT_RQ_MASK 0xff000000 /* target only */
+#define AGPSTAT_SBA (0x1 << 9) /* always 1 for 3.0 */
+#define AGPSTAT_OVER4G (0x1 << 5)
+#define AGPSTAT_FW (0x1 << 4)
+#define AGPSTAT_RATE_MASK 0x7
+/* rate for 2.0 mode */
+#define AGP2_RATE_1X 0x1
+#define AGP2_RATE_2X 0x2
+#define AGP2_RATE_4X 0x4
+/* AGP 3.0 only bits */
+#define AGPSTAT_ARQSZ_MASK (0x7 << 13) /* target only */
+#define AGPSTAT_CAL_MASK (0x7 << 10)
+#define AGPSTAT_GART64B (0x1 << 7) /* target only */
+#define AGPSTAT_MODE3 (0x1 << 3)
+/* Rate for 3.0 mode */
+#define AGP3_RATE_4X 0x1
+#define AGP3_RATE_8X 0x2
+
+/* AGP command register bits definition */
+#define AGPCMD_RQ_MASK 0xff000000 /* master only */
+#define AGPCMD_SBAEN (0x1 << 9) /* must be 1 for 3.0 */
+#define AGPCMD_AGPEN (0x1 << 8)
+#define AGPCMD_OVER4GEN (0x1 << 5)
+#define AGPCMD_FWEN (0x1 << 4)
+#define AGPCMD_RATE_MASK 0x7
+/* AGP 3.0 only bits */
+#define AGP3_CMD_ARQSZ_MASK (0x7 << 13) /* master only */
+#define AGP3_CMD_CAL_MASK (0x7 << 10) /* target only */
+#define AGP3_CMD_GART64BEN (0x1 << 7) /* target only */
+
+#define AGP_DEVICE "/dev/agpgart"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AGPGART_H */
diff --git a/usr/src/uts/common/sys/gfx_private.h b/usr/src/uts/common/sys/gfx_private.h
new file mode 100644
index 0000000..5c85d89
--- /dev/null
+++ b/usr/src/uts/common/sys/gfx_private.h
@@ -0,0 +1,103 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _GFX_PRIVATE_H
+#define _GFX_PRIVATE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Memory cache attributes */
+#define GFXP_MEMORY_CACHED 0
+#define GFXP_MEMORY_UNCACHED 1
+#define GFXP_MEMORY_WRITECOMBINED 2
+
+typedef uint64_t gfx_maddr_t;
+
+extern int gfxp_ddi_segmap_setup(dev_t dev, off_t offset, struct as *as,
+ caddr_t *addrp, off_t len, uint_t prot, uint_t maxprot, uint_t flags,
+ cred_t *cred, ddi_device_acc_attr_t *accattrp, uint_t rnumber);
+
+extern ddi_umem_cookie_t gfxp_umem_cookie_init(caddr_t kva, size_t size);
+extern void gfxp_umem_cookie_destroy(ddi_umem_cookie_t cookie);
+extern int gfxp_devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip,
+ struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie,
+ offset_t off, size_t len, uint_t maxprot, uint_t flags,
+ ddi_device_acc_attr_t *accattrp);
+extern void gfxp_map_devmem(devmap_cookie_t dhc, gfx_maddr_t maddr,
+ size_t length, ddi_device_acc_attr_t *attrp);
+
+
+typedef char *gfxp_acc_handle_t;
+extern gfxp_acc_handle_t gfxp_pci_init_handle(uint8_t bus, uint8_t slot,
+ uint8_t function, uint16_t *vendor, uint16_t *device);
+extern uint8_t gfxp_pci_read_byte(gfxp_acc_handle_t handle, uint16_t offset);
+extern uint16_t gfxp_pci_read_word(gfxp_acc_handle_t handle, uint16_t offset);
+extern uint32_t gfxp_pci_read_dword(gfxp_acc_handle_t handle, uint16_t offset);
+extern void gfxp_pci_write_byte(gfxp_acc_handle_t handle, uint16_t offset,
+ uint8_t value);
+extern void gfxp_pci_write_word(gfxp_acc_handle_t handle, uint16_t offset,
+ uint16_t value);
+extern void gfxp_pci_write_dword(gfxp_acc_handle_t handle, uint16_t offset,
+ uint32_t value);
+extern int gfxp_pci_device_present(uint16_t vendor, uint16_t device);
+
+typedef char *gfxp_kva_t;
+extern gfxp_kva_t gfxp_map_kernel_space(uint64_t start, size_t size,
+ uint32_t mode);
+extern void gfxp_unmap_kernel_space(gfxp_kva_t address, size_t size);
+extern int gfxp_va2pa(struct as *as, caddr_t addr, uint64_t *pa);
+extern void gfxp_fix_mem_cache_attrs(caddr_t kva_start, size_t length,
+ int cache_attr);
+extern gfx_maddr_t gfxp_convert_addr(paddr_t paddr);
+
+typedef char *gfxp_vgatext_softc_ptr_t;
+
+extern gfxp_vgatext_softc_ptr_t gfxp_vgatext_softc_alloc(void);
+extern void gfxp_vgatext_softc_free(gfxp_vgatext_softc_ptr_t ptr);
+extern int gfxp_vgatext_attach(dev_info_t *devi, ddi_attach_cmd_t cmd,
+ gfxp_vgatext_softc_ptr_t ptr);
+extern int gfxp_vgatext_detach(dev_info_t *devi, ddi_detach_cmd_t cmd,
+ gfxp_vgatext_softc_ptr_t ptr);
+extern int gfxp_vgatext_open(dev_t *devp, int flag, int otyp, cred_t *cred,
+ gfxp_vgatext_softc_ptr_t ptr);
+extern int gfxp_vgatext_close(dev_t devp, int flag, int otyp, cred_t *cred,
+ gfxp_vgatext_softc_ptr_t ptr);
+extern int gfxp_vgatext_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
+ cred_t *cred, int *rval, gfxp_vgatext_softc_ptr_t ptr);
+
+extern int gfxp_mlock_user_memory(caddr_t address, size_t length);
+extern int gfxp_munlock_user_memory(caddr_t address, size_t length);
+extern int gfxp_vgatext_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off,
+ size_t len, size_t *maplen, uint_t model, void *ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _GFX_PRIVATE_H */
diff --git a/usr/src/uts/intel/agpgart/Makefile b/usr/src/uts/intel/agpgart/Makefile
new file mode 100644
index 0000000..e80b1db
--- /dev/null
+++ b/usr/src/uts/intel/agpgart/Makefile
@@ -0,0 +1,73 @@
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# uts/intel/agpgart/Makefile
+#
+
+#
+# This makefile drives the framework of agp protocol
+# (agpgart) kernel module.
+#
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = agpgart
+OBJECTS = $(AGPGART_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(AGPGART_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/intel/io/agpgart
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+CERRWARN += -_gcc=-Wno-switch
+CERRWARN += -_gcc=-Wno-parentheses
+CERRWARN += -_gcc=-Wno-uninitialized
+
+#
+# For now, disable these lint checks; maintainers should endeavor
+# to investigate and remove these for maximum lint coverage.
+# Please do not carry these forward to new Makefiles.
+#
+LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/agpmaster/Makefile b/usr/src/uts/intel/agpmaster/Makefile
new file mode 100644
index 0000000..c6a25d3
--- /dev/null
+++ b/usr/src/uts/intel/agpmaster/Makefile
@@ -0,0 +1,97 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# uts/intel/agpmaster/Makefile
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the common graphics
+# interface kernel module.
+#
+# intel platform dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = agpmaster
+OBJECTS = $(AGP_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(AGP_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_MISC_DIR)/$(MODULE)
+INC_PATH += -I$(UTSBASE)/intel/io/agpmaster
+
+#
+# dependency
+#
+LDFLAGS += -dy -Nmisc/pci_autoconfig
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# For now, disable these lint checks; maintainers should endeavor
+# to investigate and remove these for maximum lint coverage.
+# Please do not carry these forward to new Makefiles.
+#
+LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/agptarget/Makefile b/usr/src/uts/intel/agptarget/Makefile
new file mode 100644
index 0000000..8cf479a
--- /dev/null
+++ b/usr/src/uts/intel/agptarget/Makefile
@@ -0,0 +1,67 @@
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# uts/intel/agptarget/Makefile
+#
+#
+# This makefile drives the framework of agp protocol
+# (agptarget) kernel module.
+#
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = agptarget
+OBJECTS = $(AGPTARGET_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(AGPTARGET_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+#
+# dependency
+#
+LDFLAGS += -dy -Nmisc/busra
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+CERRWARN += -_gcc=-Wno-uninitialized
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/amd64_gart/Makefile b/usr/src/uts/intel/amd64_gart/Makefile
new file mode 100644
index 0000000..f2a2200
--- /dev/null
+++ b/usr/src/uts/intel/amd64_gart/Makefile
@@ -0,0 +1,62 @@
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# uts/intel/amd64_gart/Makefile
+#
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the framework of agp protocol
+# (amd64_gart) kernel module.
+#
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = amd64_gart
+OBJECTS = $(AMD64GART_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(AMD64GART_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/drm/Makefile b/usr/src/uts/intel/drm/Makefile
new file mode 100644
index 0000000..1e48127
--- /dev/null
+++ b/usr/src/uts/intel/drm/Makefile
@@ -0,0 +1,90 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# uts/intel/drm/Makefile
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# This makefile drives the production of the DRM (Direct Rendering
+# Manager) common misc module.
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = drm
+OBJECTS = $(DRM_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(DRM_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_MISC_DIR)/$(MODULE)
+DRM_SRC = $(UTSBASE)/i86pc/io/drm
+GFX_DIR = $(UTSBASE)/i86pc/io/gfx_private
+
+INC_PATH += -I$(DRM_SRC) -I$(GFX_DIR)
+
+# Dependency
+LDFLAGS += -dy -Nmisc/agpmaster -Nmisc/gfx_private
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+CERRWARN += -_gcc=-Wno-parentheses
+CERRWARN += -_gcc=-Wno-uninitialized
+CERRWARN += -_gcc=-Wno-unused-variable
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/i915/Makefile b/usr/src/uts/intel/i915/Makefile
new file mode 100644
index 0000000..e0c2a30
--- /dev/null
+++ b/usr/src/uts/intel/i915/Makefile
@@ -0,0 +1,93 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# uts/intel/drm/Makefile
+#
+# This makefile drives the production of i915 graphics device driver,
+# which supports the DRI (Direct Rendering Infrastructure), with the help
+# of drm common misc module.
+#
+# intel platform dependent
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = i915
+OBJECTS = $(I915_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(I915_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+# i915 driver depends on drm, agpmaster and gfx_private misc modules
+INC_PATH += -I$(UTSBASE)/intel/io/drm -I$(UTSBASE)/common/io/drm
+
+#
+# dependency
+LDFLAGS += -dy -Nmisc/drm
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+CERRWARN += -_gcc=-Wno-unused-label
+
+#
+# Re-define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/io/agpgart/agp_kstat.c b/usr/src/uts/intel/io/agpgart/agp_kstat.c
new file mode 100644
index 0000000..1913fb8
--- /dev/null
+++ b/usr/src/uts/intel/io/agpgart/agp_kstat.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/kstat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpgart_impl.h>
+
+/*
+ * The values of type agp_arc_type_t are used as indices into arc_name
+ * So if agp_arc_type_t's values are changed in the future, the content
+ * of arc_name must be changed accordingly.
+ */
+static const char *arc_name[] = {
+ "IGD_810",
+ "IGD_830",
+ "INTEL_AGP",
+ "AMD64_AGP",
+ "AMD64_NONAGP",
+ "UNKNOWN"
+};
+
+static char *agpkstat_name[] = {
+ "&arc_type",
+ "master_dev_id",
+ "master_dev_version",
+ "master_dev_status",
+ "$prealloc_size",
+ "target_dev_id",
+ "target_dev_version",
+ "target_dev_status",
+ "$aper_base",
+ "$aper_size",
+ "&agp_enabled",
+ "agp_mode_set",
+ "$aper_used",
+ NULL
+};
+
+static void
+agp_set_char_kstat(kstat_named_t *knp, const char *s)
+{
+ (void) strlcpy(knp->value.c, s, sizeof (knp->value.c));
+}
+
+static int
+agp_kstat_update(kstat_t *ksp, int flag)
+{
+ agpgart_softstate_t *sc;
+ kstat_named_t *knp;
+ int tmp;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ sc = ksp->ks_private;
+ knp = ksp->ks_data;
+
+ agp_set_char_kstat(knp++, arc_name[sc->asoft_devreg.agprd_arctype]);
+ (knp++)->value.ui32 = sc->asoft_info.agpki_mdevid;
+ (knp++)->value.ui32 = (sc->asoft_info.agpki_mver.agpv_major<<16) |
+ sc->asoft_info.agpki_mver.agpv_minor;
+ (knp++)->value.ui32 = sc->asoft_info.agpki_mstatus;
+ (knp++)->value.ui64 = (sc->asoft_info.agpki_presize << 10) & UI32_MASK;
+ (knp++)->value.ui32 = sc->asoft_info.agpki_tdevid;
+ (knp++)->value.ui32 = (sc->asoft_info.agpki_tver.agpv_major<<16) |
+ sc->asoft_info.agpki_tver.agpv_minor;
+ (knp++)->value.ui32 = sc->asoft_info.agpki_tstatus;
+ (knp++)->value.ui64 = sc->asoft_info.agpki_aperbase;
+ (knp++)->value.ui64 =
+ (sc->asoft_info.agpki_apersize << 20) & UI32_MASK;
+
+ tmp = sc->asoft_agpen;
+ agp_set_char_kstat(knp++, (tmp > 0) ? "yes" : "no");
+
+ (knp++)->value.ui32 = sc->asoft_mode;
+ (knp++)->value.ui64 = (sc->asoft_pgused << 12) & UI32_MASK;
+
+ return (0);
+}
+
+int
+agp_init_kstats(agpgart_softstate_t *sc)
+{
+ int instance;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ char *np;
+ int type;
+ char **aknp;
+
+ instance = ddi_get_instance(sc->asoft_dip);
+ aknp = agpkstat_name;
+ ksp = kstat_create(AGPGART_DEVNODE, instance, "agpinfo", "agp",
+ KSTAT_TYPE_NAMED, sizeof (agpkstat_name)/sizeof (char *) - 1,
+ KSTAT_FLAG_PERSISTENT);
+ if (ksp == NULL)
+ return (NULL);
+
+ ksp->ks_private = sc;
+ ksp->ks_update = agp_kstat_update;
+ for (knp = ksp->ks_data; (np = (*aknp)) != NULL; knp++, aknp++) {
+ switch (*np) {
+ case '$':
+ np += 1;
+ type = KSTAT_DATA_UINT64;
+ break;
+ case '&':
+ np += 1;
+ type = KSTAT_DATA_CHAR;
+ break;
+ default:
+ type = KSTAT_DATA_UINT32;
+ break;
+
+ }
+ kstat_named_init(knp, np, type);
+ }
+ kstat_install(ksp);
+
+ sc->asoft_ksp = ksp;
+
+ return (0);
+}
+
+void
+agp_fini_kstats(agpgart_softstate_t *sc)
+{
+ ASSERT(sc->asoft_ksp);
+ kstat_delete(sc->asoft_ksp);
+}
diff --git a/usr/src/uts/intel/io/agpgart/agpgart.c b/usr/src/uts/intel/io/agpgart/agpgart.c
new file mode 100644
index 0000000..7df0518
--- /dev/null
+++ b/usr/src/uts/intel/io/agpgart/agpgart.c
@@ -0,0 +1,3529 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Portions Philip Brown phil@bolthole.com Dec 2001
+ */
+
+
+/*
+ * agpgart driver
+ *
+ * This driver is primary targeted at providing memory support for INTEL
+ * AGP device, INTEL memory less video card, and AMD64 cpu GART devices.
+ * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP,
+ * ARC_AMD64AGP to agpgart driver. However, the memory
+ * interfaces are the same for these architectures. The difference is how to
+ * manage the hardware GART table for them.
+ *
+ * For large memory allocation, this driver use direct mapping to userland
+ * application interface to save kernel virtual memory .
+ */
+
+#include <sys/types.h>
+#include <sys/pci.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/kstat.h>
+#include <sys/stat.h>
+#include <sys/modctl.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/policy.h>
+#include <sys/ddidevmap.h>
+#include <vm/seg_dev.h>
+#include <sys/pmem.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpgart_impl.h>
+#include <sys/agp/agpamd64gart_io.h>
+#include <sys/agp/agpmaster_io.h>
+#include <sys/agp/agptarget_io.h>
+
+/* Dynamic debug support */
+int agp_debug_var = 0;
+#define AGPDB_PRINT1(fmt) if (agp_debug_var == 1) cmn_err fmt
+#define AGPDB_PRINT2(fmt) if (agp_debug_var >= 1) cmn_err fmt
+
+/* Driver global softstate handle */
+static void *agpgart_glob_soft_handle;
+
+#define MAX_INSTNUM 16
+
+#define AGP_DEV2INST(devt) (getminor((devt)) >> 4)
+#define AGP_INST2MINOR(instance) ((instance) << 4)
+#define IS_INTEL_830(type) ((type) == ARC_IGD830)
+#define IS_TRUE_AGP(type) (((type) == ARC_INTELAGP) || \
+ ((type) == ARC_AMD64AGP))
+
+#define AGP_HASH_NODE 1024
+
+static void
+list_head_init(struct list_head *head) {
+ struct list_head *entry, *tmp;
+ /* HASH for accelerate */
+ entry = kmem_zalloc(AGP_HASH_NODE *
+ sizeof (struct list_head), KM_SLEEP);
+ head->next = entry;
+ for (int i = 0; i < AGP_HASH_NODE; i++) {
+ tmp = &entry[i];
+ tmp->next = tmp;
+ tmp->prev = tmp;
+ tmp->gttseg = NULL;
+ }
+}
+
+static void
+list_head_add_new(struct list_head *head,
+ igd_gtt_seg_t *gttseg)
+{
+ struct list_head *entry, *tmp;
+ int key;
+ entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
+ key = gttseg->igs_pgstart % AGP_HASH_NODE;
+ tmp = &head->next[key];
+ tmp->next->prev = entry;
+ entry->next = tmp->next;
+ entry->prev = tmp;
+ tmp->next = entry;
+ entry->gttseg = gttseg;
+}
+
+static void
+list_head_del(struct list_head *entry) {
+ (entry)->next->prev = (entry)->prev; \
+ (entry)->prev->next = (entry)->next; \
+ (entry)->gttseg = NULL; \
+}
+
+#define list_head_for_each_safe(entry, temp, head) \
+ for (int key = 0; key < AGP_HASH_NODE; key++) \
+ for (entry = (&(head)->next[key])->next, temp = (entry)->next; \
+ entry != &(head)->next[key]; \
+ entry = temp, temp = temp->next)
+
+
+#define agpinfo_default_to_32(v, v32) \
+ { \
+ (v32).agpi32_version = (v).agpi_version; \
+ (v32).agpi32_devid = (v).agpi_devid; \
+ (v32).agpi32_mode = (v).agpi_mode; \
+ (v32).agpi32_aperbase = (uint32_t)(v).agpi_aperbase; \
+ (v32).agpi32_apersize = (uint32_t)(v).agpi_apersize; \
+ (v32).agpi32_pgtotal = (v).agpi_pgtotal; \
+ (v32).agpi32_pgsystem = (v).agpi_pgsystem; \
+ (v32).agpi32_pgused = (v).agpi_pgused; \
+ }
+
+static ddi_dma_attr_t agpgart_dma_attr = {
+ DMA_ATTR_V0,
+ 0U, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */
+ 1, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 4, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/*
+ * AMD64 supports gart table above 4G. See alloc_gart_table.
+ */
+static ddi_dma_attr_t garttable_dma_attr = {
+ DMA_ATTR_V0,
+ 0U, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */
+ 1, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 4, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/*
+ * AGPGART table need a physical contiguous memory. To assure that
+ * each access to gart table is strongly ordered and uncachable,
+ * we use DDI_STRICTORDER_ACC.
+ */
+static ddi_device_acc_attr_t gart_dev_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */
+};
+
+/*
+ * AGP memory is usually used as texture memory or for a framebuffer, so we
+ * can set the memory attribute to write combining. Video drivers will
+ * determine the frame buffer attributes, for example the memory is write
+ * combinging or non-cachable. However, the interface between Xorg and agpgart
+ * driver to support attribute selcetion doesn't exist yet. So we set agp memory
+ * to non-cachable by default now. This attribute might be overridden
+ * by MTTR in X86.
+ */
+static ddi_device_acc_attr_t mem_dev_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC /* Can be DDI_MERGING_OK_ACC */
+};
+
+static keytable_ent_t *
+agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset);
+static void
+amd64_gart_unregister(amd64_garts_dev_t *cpu_garts);
+
+
+static void
+agp_devmap_unmap(devmap_cookie_t handle, void *devprivate,
+ offset_t off, size_t len, devmap_cookie_t new_handle1,
+ void **new_devprivate1, devmap_cookie_t new_handle2,
+ void **new_devprivate2)
+{
+
+ struct keytable_ent *mementry;
+ agpgart_softstate_t *softstate;
+ agpgart_ctx_t *ctxp, *newctxp1, *newctxp2;
+
+ ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off));
+ ASSERT(devprivate);
+ ASSERT(handle);
+
+ ctxp = (agpgart_ctx_t *)devprivate;
+ softstate = ctxp->actx_sc;
+ ASSERT(softstate);
+
+ if (new_handle1 != NULL) {
+ newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
+ newctxp1->actx_sc = softstate;
+ newctxp1->actx_off = ctxp->actx_off;
+ *new_devprivate1 = newctxp1;
+ }
+
+ if (new_handle2 != NULL) {
+ newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
+ newctxp2->actx_sc = softstate;
+ newctxp2->actx_off = off + len;
+ *new_devprivate2 = newctxp2;
+ }
+
+ mutex_enter(&softstate->asoft_instmutex);
+ if ((new_handle1 == NULL) && (new_handle2 == NULL)) {
+ mementry =
+ agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
+ ASSERT(mementry);
+ mementry->kte_refcnt--;
+ } else if ((new_handle1 != NULL) && (new_handle2 != NULL)) {
+ mementry =
+ agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
+ ASSERT(mementry);
+ mementry->kte_refcnt++;
+ }
+ ASSERT(mementry->kte_refcnt >= 0);
+ mutex_exit(&softstate->asoft_instmutex);
+ kmem_free(ctxp, sizeof (struct agpgart_ctx));
+}
+
+/*ARGSUSED*/
+static int
+agp_devmap_map(devmap_cookie_t handle, dev_t dev,
+ uint_t flags, offset_t offset, size_t len, void **new_devprivate)
+{
+ agpgart_softstate_t *softstate;
+ int instance;
+ struct keytable_ent *mementry;
+ agpgart_ctx_t *newctxp;
+
+ ASSERT(handle);
+ instance = AGP_DEV2INST(dev);
+ softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ if (softstate == NULL) {
+ AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err"));
+ return (ENXIO);
+ }
+
+ ASSERT(softstate);
+ ASSERT(mutex_owned(&softstate->asoft_instmutex));
+ ASSERT(len);
+ ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len));
+
+ mementry =
+ agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
+ ASSERT(mementry);
+ mementry->kte_refcnt++;
+ ASSERT(mementry->kte_refcnt >= 0);
+ newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
+ newctxp->actx_off = offset;
+ newctxp->actx_sc = softstate;
+ *new_devprivate = newctxp;
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate,
+ devmap_cookie_t new_handle, void **new_devprivate)
+{
+ struct keytable_ent *mementry;
+ agpgart_ctx_t *newctxp, *ctxp;
+ agpgart_softstate_t *softstate;
+
+ ASSERT(devprivate);
+ ASSERT(handle && new_handle);
+
+ ctxp = (agpgart_ctx_t *)devprivate;
+ ASSERT(AGP_ALIGNED(ctxp->actx_off));
+
+ newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
+ newctxp->actx_off = ctxp->actx_off;
+ newctxp->actx_sc = ctxp->actx_sc;
+ softstate = (agpgart_softstate_t *)newctxp->actx_sc;
+
+ mutex_enter(&softstate->asoft_instmutex);
+ mementry = agp_find_bound_keyent(softstate,
+ AGP_BYTES2PAGES(newctxp->actx_off));
+ mementry->kte_refcnt++;
+ ASSERT(mementry->kte_refcnt >= 0);
+ mutex_exit(&softstate->asoft_instmutex);
+ *new_devprivate = newctxp;
+
+ return (0);
+}
+
+struct devmap_callback_ctl agp_devmap_cb = {
+ DEVMAP_OPS_REV, /* rev */
+ agp_devmap_map, /* map */
+ NULL, /* access */
+ agp_devmap_dup, /* dup */
+ agp_devmap_unmap, /* unmap */
+};
+
+/*
+ * agp_master_regis_byname()
+ *
+ * Description:
+ * Open the AGP master device node by device path name and
+ * register the device handle for later operations.
+ * We check all possible driver instance from 0
+ * to MAX_INSTNUM because the master device could be
+ * at any instance number. Only one AGP master is supported.
+ *
+ * Arguments:
+ * master_hdlp AGP master device LDI handle pointer
+ * agpgart_l AGPGART driver LDI identifier
+ *
+ * Returns:
+ * -1 failed
+ * 0 success
+ */
+static int
+agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li)
+{
+ int i;
+ char buf[MAXPATHLEN];
+
+ ASSERT(master_hdlp);
+ ASSERT(agpgart_li);
+
+ /*
+ * Search all possible instance numbers for the agp master device.
+ * Only one master device is supported now, so the search ends
+ * when one master device is found.
+ */
+ for (i = 0; i < MAX_INSTNUM; i++) {
+ (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i);
+ if ((ldi_open_by_name(buf, 0, kcred,
+ master_hdlp, agpgart_li)))
+ continue;
+ AGPDB_PRINT1((CE_NOTE,
+ "master device found: instance number=%d", i));
+ break;
+
+ }
+
+ /* AGP master device not found */
+ if (i == MAX_INSTNUM)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * agp_target_regis_byname()
+ *
+ * Description:
+ * This function opens agp bridge device node by
+ * device path name and registers the device handle
+ * for later operations.
+ * We check driver instance from 0 to MAX_INSTNUM
+ * because the master device could be at any instance
+ * number. Only one agp target is supported.
+ *
+ *
+ * Arguments:
+ * target_hdlp AGP target device LDI handle pointer
+ * agpgart_l AGPGART driver LDI identifier
+ *
+ * Returns:
+ * -1 failed
+ * 0 success
+ */
+static int
+agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li)
+{
+ int i;
+ char buf[MAXPATHLEN];
+
+ ASSERT(target_hdlp);
+ ASSERT(agpgart_li);
+
+ for (i = 0; i < MAX_INSTNUM; i++) {
+ (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i);
+ if ((ldi_open_by_name(buf, 0, kcred,
+ target_hdlp, agpgart_li)))
+ continue;
+
+ AGPDB_PRINT1((CE_NOTE,
+ "bridge device found: instance number=%d", i));
+ break;
+
+ }
+
+ /* AGP bridge device not found */
+ if (i == MAX_INSTNUM) {
+ AGPDB_PRINT2((CE_WARN, "bridge device not found"));
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * amd64_gart_regis_byname()
+ *
+ * Description:
+ * Open all amd64 gart device nodes by deice path name and
+ * register the device handles for later operations. Each cpu
+ * has its own amd64 gart device.
+ *
+ * Arguments:
+ * cpu_garts cpu garts device list header
+ * agpgart_l AGPGART driver LDI identifier
+ *
+ * Returns:
+ * -1 failed
+ * 0 success
+ */
+static int
+amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li)
+{
+ amd64_gart_dev_list_t *gart_list;
+ int i;
+ char buf[MAXPATHLEN];
+ ldi_handle_t gart_hdl;
+ int ret;
+
+ ASSERT(cpu_garts);
+ ASSERT(agpgart_li);
+
+ /*
+ * Search all possible instance numbers for the gart devices.
+ * There can be multiple on-cpu gart devices for Opteron server.
+ */
+ for (i = 0; i < MAX_INSTNUM; i++) {
+ (void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i);
+ ret = ldi_open_by_name(buf, 0, kcred,
+ &gart_hdl, agpgart_li);
+
+ if (ret == ENODEV)
+ continue;
+ else if (ret != 0) { /* There was an error opening the device */
+ amd64_gart_unregister(cpu_garts);
+ return (ret);
+ }
+
+ AGPDB_PRINT1((CE_NOTE,
+ "amd64 gart device found: instance number=%d", i));
+
+ gart_list = (amd64_gart_dev_list_t *)
+ kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP);
+
+ /* Add new item to the head of the gart device list */
+ gart_list->gart_devhdl = gart_hdl;
+ gart_list->next = cpu_garts->gart_dev_list_head;
+ cpu_garts->gart_dev_list_head = gart_list;
+ cpu_garts->gart_device_num++;
+ }
+
+ if (cpu_garts->gart_device_num == 0)
+ return (ENODEV);
+ return (0);
+}
+
+/*
+ * Unregister agp master device handle
+ */
+static void
+agp_master_unregister(ldi_handle_t *master_hdlp)
+{
+ ASSERT(master_hdlp);
+
+ if (master_hdlp) {
+ (void) ldi_close(*master_hdlp, 0, kcred);
+ *master_hdlp = NULL;
+ }
+}
+
+/*
+ * Unregister agp bridge device handle
+ */
+static void
+agp_target_unregister(ldi_handle_t *target_hdlp)
+{
+ if (target_hdlp) {
+ (void) ldi_close(*target_hdlp, 0, kcred);
+ *target_hdlp = NULL;
+ }
+}
+
+/*
+ * Unregister all amd64 gart device handles
+ */
+static void
+amd64_gart_unregister(amd64_garts_dev_t *cpu_garts)
+{
+ amd64_gart_dev_list_t *gart_list;
+ amd64_gart_dev_list_t *next;
+
+ ASSERT(cpu_garts);
+
+ for (gart_list = cpu_garts->gart_dev_list_head;
+ gart_list; gart_list = next) {
+
+ ASSERT(gart_list->gart_devhdl);
+ (void) ldi_close(gart_list->gart_devhdl, 0, kcred);
+ next = gart_list->next;
+ /* Free allocated memory */
+ kmem_free(gart_list, sizeof (amd64_gart_dev_list_t));
+ }
+ cpu_garts->gart_dev_list_head = NULL;
+ cpu_garts->gart_device_num = 0;
+}
+
+/*
+ * lyr_detect_master_type()
+ *
+ * Description:
+ * This function gets agp master type by querying agp master device.
+ *
+ * Arguments:
+ * master_hdlp agp master device ldi handle pointer
+ *
+ * Returns:
+ * -1 unsupported device
+ * DEVICE_IS_I810 i810 series
+ * DEVICE_IS_I810 i830 series
+ * DEVICE_IS_AGP true agp master
+ */
+static int
+lyr_detect_master_type(ldi_handle_t *master_hdlp)
+{
+ int vtype;
+ int err;
+
+ ASSERT(master_hdlp);
+
+ /* ldi_ioctl(agpmaster) */
+ err = ldi_ioctl(*master_hdlp, DEVICE_DETECT,
+ (intptr_t)&vtype, FKIOCTL, kcred, 0);
+ if (err) /* Unsupported graphics device */
+ return (-1);
+ return (vtype);
+}
+
+/*
+ * devtect_target_type()
+ *
+ * Description:
+ * This function gets the host bridge chipset type by querying the agp
+ * target device.
+ *
+ * Arguments:
+ * target_hdlp agp target device LDI handle pointer
+ *
+ * Returns:
+ * CHIP_IS_INTEL Intel agp chipsets
+ * CHIP_IS_AMD AMD agp chipset
+ * -1 unsupported chipset
+ */
+static int
+lyr_detect_target_type(ldi_handle_t *target_hdlp)
+{
+ int btype;
+ int err;
+
+ ASSERT(target_hdlp);
+
+ err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype,
+ FKIOCTL, kcred, 0);
+ if (err) /* Unsupported bridge device */
+ return (-1);
+ return (btype);
+}
+
+/*
+ * lyr_init()
+ *
+ * Description:
+ * This function detects the graphics system architecture and
+ * registers all relative device handles in a global structure
+ * "agp_regdev". Then it stores the system arc type in driver
+ * soft state.
+ *
+ * Arguments:
+ * agp_regdev AGP devices registration struct pointer
+ * agpgart_l AGPGART driver LDI identifier
+ *
+ * Returns:
+ * 0 System arc supported and agp devices registration successed.
+ * -1 System arc not supported or device registration failed.
+ */
+int
+lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li)
+{
+ ldi_handle_t *master_hdlp;
+ ldi_handle_t *target_hdlp;
+ amd64_garts_dev_t *garts_dev;
+ int card_type, chip_type;
+ int ret;
+
+ ASSERT(agp_regdev);
+
+ bzero(agp_regdev, sizeof (agp_registered_dev_t));
+ agp_regdev->agprd_arctype = ARC_UNKNOWN;
+ /*
+ * Register agp devices, assuming all instances attached, and
+ * detect which agp architucture this server belongs to. This
+ * must be done before the agpgart driver starts to use layered
+ * driver interfaces.
+ */
+ master_hdlp = &agp_regdev->agprd_masterhdl;
+ target_hdlp = &agp_regdev->agprd_targethdl;
+ garts_dev = &agp_regdev->agprd_cpugarts;
+
+ /* Check whether the system is amd64 arc */
+ if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) {
+ /* No amd64 gart devices */
+ AGPDB_PRINT1((CE_NOTE,
+ "lyr_init: this is not an amd64 system"));
+ if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
+ AGPDB_PRINT2((CE_WARN,
+ "lyr_init: register master device unsuccessful"));
+ goto err1;
+ }
+ if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
+ AGPDB_PRINT2((CE_WARN,
+ "lyr_init: register target device unsuccessful"));
+ goto err2;
+ }
+ card_type = lyr_detect_master_type(master_hdlp);
+ /*
+ * Detect system arc by master device. If it is a intel
+ * integrated device, finish the detection successfully.
+ */
+ switch (card_type) {
+ case DEVICE_IS_I810: /* I810 likewise graphics */
+ AGPDB_PRINT1((CE_NOTE,
+ "lyr_init: the system is Intel 810 arch"));
+ agp_regdev->agprd_arctype = ARC_IGD810;
+ return (0);
+ case DEVICE_IS_I830: /* I830 likewise graphics */
+ AGPDB_PRINT1((CE_NOTE,
+ "lyr_init: the system is Intel 830 arch"));
+ agp_regdev->agprd_arctype = ARC_IGD830;
+ return (0);
+ case DEVICE_IS_AGP: /* AGP graphics */
+ break;
+ default: /* Non IGD/AGP graphics */
+ AGPDB_PRINT2((CE_WARN,
+ "lyr_init: non-supported master device"));
+ goto err3;
+ }
+
+ chip_type = lyr_detect_target_type(target_hdlp);
+
+ /* Continue to detect AGP arc by target device */
+ switch (chip_type) {
+ case CHIP_IS_INTEL: /* Intel chipset */
+ AGPDB_PRINT1((CE_NOTE,
+ "lyr_init: Intel AGP arch detected"));
+ agp_regdev->agprd_arctype = ARC_INTELAGP;
+ return (0);
+ case CHIP_IS_AMD: /* AMD chipset */
+ AGPDB_PRINT2((CE_WARN,
+ "lyr_init: no cpu gart, but have AMD64 chipsets"));
+ goto err3;
+ default: /* Non supported chipset */
+ AGPDB_PRINT2((CE_WARN,
+ "lyr_init: detection can not continue"));
+ goto err3;
+ }
+
+ }
+
+ if (ret)
+ return (-1); /* Errors in open amd64 cpu gart devices */
+
+ /*
+ * AMD64 cpu gart device exsits, continue detection
+ */
+ if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
+ AGPDB_PRINT1((CE_NOTE, "lyr_init: no AGP master in amd64"));
+ goto err1;
+ }
+
+ if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
+ AGPDB_PRINT1((CE_NOTE,
+ "lyr_init: no AGP bridge"));
+ goto err2;
+ }
+
+ AGPDB_PRINT1((CE_NOTE,
+ "lyr_init: the system is AMD64 AGP architecture"));
+
+ agp_regdev->agprd_arctype = ARC_AMD64AGP;
+
+ return (0); /* Finished successfully */
+
+err3:
+ agp_target_unregister(&agp_regdev->agprd_targethdl);
+err2:
+ agp_master_unregister(&agp_regdev->agprd_masterhdl);
+err1:
+ /* AMD64 CPU gart registered ? */
+ if (ret == 0) {
+ amd64_gart_unregister(garts_dev);
+ }
+ agp_regdev->agprd_arctype = ARC_UNKNOWN;
+ return (-1);
+}
+
+void
+lyr_end(agp_registered_dev_t *agp_regdev)
+{
+ ASSERT(agp_regdev);
+
+ switch (agp_regdev->agprd_arctype) {
+ case ARC_IGD810:
+ case ARC_IGD830:
+ case ARC_INTELAGP:
+ agp_master_unregister(&agp_regdev->agprd_masterhdl);
+ agp_target_unregister(&agp_regdev->agprd_targethdl);
+
+ return;
+ case ARC_AMD64AGP:
+ agp_master_unregister(&agp_regdev->agprd_masterhdl);
+ agp_target_unregister(&agp_regdev->agprd_targethdl);
+ amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
+
+ return;
+ default:
+ ASSERT(0);
+ return;
+ }
+}
+
+int
+lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev)
+{
+ ldi_handle_t hdl;
+ igd_info_t value1;
+ i_agp_info_t value2;
+ size_t prealloc_size;
+ int err;
+
+ ASSERT(info);
+ ASSERT(agp_regdev);
+
+ switch (agp_regdev->agprd_arctype) {
+ case ARC_IGD810:
+ hdl = agp_regdev->agprd_masterhdl;
+ err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
+ FKIOCTL, kcred, 0);
+ if (err)
+ return (-1);
+ info->agpki_mdevid = value1.igd_devid;
+ info->agpki_aperbase = value1.igd_aperbase;
+ info->agpki_apersize = (uint32_t)value1.igd_apersize;
+
+ hdl = agp_regdev->agprd_targethdl;
+ err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
+ (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
+ if (err)
+ return (-1);
+ info->agpki_presize = prealloc_size;
+
+ break;
+
+ case ARC_IGD830:
+ hdl = agp_regdev->agprd_masterhdl;
+ err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
+ FKIOCTL, kcred, 0);
+ if (err)
+ return (-1);
+ info->agpki_mdevid = value1.igd_devid;
+ info->agpki_aperbase = value1.igd_aperbase;
+ info->agpki_apersize = (uint32_t)value1.igd_apersize;
+
+ hdl = agp_regdev->agprd_targethdl;
+ err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
+ (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
+ if (err)
+ return (-1);
+
+ /*
+ * Assume all units are kilobytes unless explicitly
+ * stated below:
+ * preallocated GTT memory = preallocated memory - GTT size
+ * - scratch page size
+ *
+ * scratch page size = 4
+ * GTT size (KB) = aperture size (MB)
+ * this algorithm came from Xorg source code
+ */
+ if (prealloc_size > (info->agpki_apersize + 4))
+ prealloc_size =
+ prealloc_size - info->agpki_apersize - 4;
+ else {
+ AGPDB_PRINT2((CE_WARN, "lyr_get_info: "
+ "pre-allocated memory too small, setting to zero"));
+ prealloc_size = 0;
+ }
+ info->agpki_presize = prealloc_size;
+ AGPDB_PRINT2((CE_NOTE,
+ "lyr_get_info: prealloc_size = %ldKB, apersize = %dMB",
+ prealloc_size, info->agpki_apersize));
+ break;
+ case ARC_INTELAGP:
+ case ARC_AMD64AGP:
+ /* AGP devices */
+ hdl = agp_regdev->agprd_masterhdl;
+ err = ldi_ioctl(hdl, AGP_MASTER_GETINFO,
+ (intptr_t)&value2, FKIOCTL, kcred, 0);
+ if (err)
+ return (-1);
+ info->agpki_mdevid = value2.iagp_devid;
+ info->agpki_mver = value2.iagp_ver;
+ info->agpki_mstatus = value2.iagp_mode;
+ hdl = agp_regdev->agprd_targethdl;
+ err = ldi_ioctl(hdl, AGP_TARGET_GETINFO,
+ (intptr_t)&value2, FKIOCTL, kcred, 0);
+ if (err)
+ return (-1);
+ info->agpki_tdevid = value2.iagp_devid;
+ info->agpki_tver = value2.iagp_ver;
+ info->agpki_tstatus = value2.iagp_mode;
+ info->agpki_aperbase = value2.iagp_aperbase;
+ info->agpki_apersize = (uint32_t)value2.iagp_apersize;
+ break;
+ default:
+ AGPDB_PRINT2((CE_WARN,
+ "lyr_get_info: function doesn't work for unknown arc"));
+ return (-1);
+ }
+ if ((info->agpki_apersize >= MAXAPERMEGAS) ||
+ (info->agpki_apersize == 0) ||
+ (info->agpki_aperbase == 0)) {
+ AGPDB_PRINT2((CE_WARN,
+ "lyr_get_info: aperture is not programmed correctly!"));
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * lyr_i8xx_add_to_gtt()
+ *
+ * Description:
+ * This function sets up the integrated video device gtt table
+ * via an ioclt to the AGP master driver.
+ *
+ * Arguments:
+ * pg_offset The start entry to be setup
+ * keyent Keytable entity pointer
+ * agp_regdev AGP devices registration struct pointer
+ *
+ * Returns:
+ * 0 success
+ * -1 invalid operations
+ */
+int
+lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent,
+ agp_registered_dev_t *agp_regdev)
+{
+ int err = 0;
+ int rval;
+ ldi_handle_t hdl;
+ igd_gtt_seg_t gttseg;
+ uint32_t *addrp, i;
+ uint32_t npages;
+
+ ASSERT(keyent);
+ ASSERT(agp_regdev);
+ gttseg.igs_pgstart = pg_offset;
+ npages = keyent->kte_pages;
+ gttseg.igs_npage = npages;
+ gttseg.igs_type = keyent->kte_type;
+ gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc
+ (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP);
+
+ addrp = gttseg.igs_phyaddr;
+ for (i = 0; i < npages; i++, addrp++) {
+ *addrp =
+ (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT);
+ }
+
+ hdl = agp_regdev->agprd_masterhdl;
+ if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)&gttseg, FKIOCTL,
+ kcred, &rval)) {
+ AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error"));
+ AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x",
+ gttseg.igs_pgstart));
+ AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x",
+ gttseg.igs_npage));
+ AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x",
+ gttseg.igs_type));
+ err = -1;
+ }
+ kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage);
+ return (err);
+}
+
+/*
+ * lyr_i8xx_remove_from_gtt()
+ *
+ * Description:
+ * This function clears the integrated video device gtt table via
+ * an ioctl to the agp master device.
+ *
+ * Arguments:
+ * pg_offset The starting entry to be cleared
+ * npage The number of entries to be cleared
+ * agp_regdev AGP devices struct pointer
+ *
+ * Returns:
+ * 0 success
+ * -1 invalid operations
+ */
+int
+lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage,
+ agp_registered_dev_t *agp_regdev)
+{
+ int rval;
+ ldi_handle_t hdl;
+ igd_gtt_seg_t gttseg;
+
+ gttseg.igs_pgstart = pg_offset;
+ gttseg.igs_npage = npage;
+
+ hdl = agp_regdev->agprd_masterhdl;
+ if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)&gttseg, FKIOCTL,
+ kcred, &rval))
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * lyr_set_gart_addr()
+ *
+ * Description:
+ * This function puts the gart table physical address in the
+ * gart base register.
+ * Please refer to gart and gtt table base register format for
+ * gart base register format in agpdefs.h.
+ *
+ * Arguments:
+ * phy_base The base physical address of gart table
+ * agp_regdev AGP devices registration struct pointer
+ *
+ * Returns:
+ * 0 success
+ * -1 failed
+ *
+ */
+
+int
+lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev)
+{
+ amd64_gart_dev_list_t *gart_list;
+ ldi_handle_t hdl;
+ int err = 0;
+
+ ASSERT(agp_regdev);
+ switch (agp_regdev->agprd_arctype) {
+ case ARC_IGD810:
+ {
+ uint32_t base;
+
+ ASSERT((phy_base & I810_POINTER_MASK) == 0);
+ base = (uint32_t)phy_base;
+
+ hdl = agp_regdev->agprd_masterhdl;
+ err = ldi_ioctl(hdl, I810_SET_GTT_BASE,
+ (intptr_t)&base, FKIOCTL, kcred, 0);
+ break;
+ }
+ case ARC_INTELAGP:
+ {
+ uint32_t addr;
+ addr = (uint32_t)phy_base;
+
+ ASSERT((phy_base & GTT_POINTER_MASK) == 0);
+ hdl = agp_regdev->agprd_targethdl;
+ err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR,
+ (intptr_t)&addr, FKIOCTL, kcred, 0);
+ break;
+ }
+ case ARC_AMD64AGP:
+ {
+ uint32_t addr;
+
+ ASSERT((phy_base & AMD64_POINTER_MASK) == 0);
+ addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT)
+ & AMD64_GARTBASE_MASK);
+
+ for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
+ gart_list;
+ gart_list = gart_list->next) {
+ hdl = gart_list->gart_devhdl;
+ if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR,
+ (intptr_t)&addr, FKIOCTL, kcred, 0)) {
+ err = -1;
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ err = -1;
+ }
+
+ if (err)
+ return (-1);
+
+ return (0);
+}
+
+int
+lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev)
+{
+ ldi_handle_t hdl;
+ uint32_t command;
+
+ ASSERT(agp_regdev);
+ command = cmd;
+ hdl = agp_regdev->agprd_targethdl;
+ if (ldi_ioctl(hdl, AGP_TARGET_SETCMD,
+ (intptr_t)&command, FKIOCTL, kcred, 0))
+ return (-1);
+ hdl = agp_regdev->agprd_masterhdl;
+ if (ldi_ioctl(hdl, AGP_MASTER_SETCMD,
+ (intptr_t)&command, FKIOCTL, kcred, 0))
+ return (-1);
+
+ return (0);
+}
+
+int
+lyr_config_devices(agp_registered_dev_t *agp_regdev)
+{
+ amd64_gart_dev_list_t *gart_list;
+ ldi_handle_t hdl;
+ int rc = 0;
+
+ ASSERT(agp_regdev);
+ switch (agp_regdev->agprd_arctype) {
+ case ARC_IGD830:
+ case ARC_IGD810:
+ break;
+ case ARC_INTELAGP:
+ {
+ hdl = agp_regdev->agprd_targethdl;
+ rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE,
+ 0, FKIOCTL, kcred, 0);
+ break;
+ }
+ case ARC_AMD64AGP:
+ {
+ /*
+ * BIOS always shadow registers such like Aperture Base
+ * register, Aperture Size Register from the AGP bridge
+ * to the AMD64 CPU host bridge. If future BIOSes are broken
+ * in this regard, we may need to shadow these registers
+ * in driver.
+ */
+
+ for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
+ gart_list;
+ gart_list = gart_list->next) {
+ hdl = gart_list->gart_devhdl;
+ if (ldi_ioctl(hdl, AMD64_CONFIGURE,
+ 0, FKIOCTL, kcred, 0)) {
+ rc = -1;
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ rc = -1;
+ }
+
+ if (rc)
+ return (-1);
+
+ return (0);
+}
+
+int
+lyr_unconfig_devices(agp_registered_dev_t *agp_regdev)
+{
+ amd64_gart_dev_list_t *gart_list;
+ ldi_handle_t hdl;
+ int rc = 0;
+
+ ASSERT(agp_regdev);
+ switch (agp_regdev->agprd_arctype) {
+ case ARC_IGD830:
+ case ARC_IGD810:
+ {
+ hdl = agp_regdev->agprd_masterhdl;
+ rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0);
+ break;
+ }
+ case ARC_INTELAGP:
+ {
+ hdl = agp_regdev->agprd_targethdl;
+ rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG,
+ 0, FKIOCTL, kcred, 0);
+ break;
+ }
+ case ARC_AMD64AGP:
+ {
+ for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
+ gart_list; gart_list = gart_list->next) {
+ hdl = gart_list->gart_devhdl;
+ if (ldi_ioctl(hdl, AMD64_UNCONFIG,
+ 0, FKIOCTL, kcred, 0)) {
+ rc = -1;
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ rc = -1;
+ }
+
+ if (rc)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * lyr_flush_gart_cache()
+ *
+ * Description:
+ * This function flushes the GART translation look-aside buffer. All
+ * GART translation caches will be flushed after this operation.
+ *
+ * Arguments:
+ * agp_regdev AGP devices struct pointer
+ */
+void
+lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev)
+{
+ amd64_gart_dev_list_t *gart_list;
+ ldi_handle_t hdl;
+
+ ASSERT(agp_regdev);
+ if (agp_regdev->agprd_arctype == ARC_AMD64AGP) {
+ for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
+ gart_list; gart_list = gart_list->next) {
+ hdl = gart_list->gart_devhdl;
+ (void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB,
+ 0, FKIOCTL, kcred, 0);
+ }
+ } else if (agp_regdev->agprd_arctype == ARC_INTELAGP) {
+ hdl = agp_regdev->agprd_targethdl;
+ (void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0,
+ FKIOCTL, kcred, 0);
+ }
+}
+
+/*
+ * get_max_pages()
+ *
+ * Description:
+ * This function compute the total pages allowed for agp aperture
+ * based on the ammount of physical pages.
+ * The algorithm is: compare the aperture size with 1/4 of total
+ * physical pages, and use the smaller one to for the max available
+ * pages. But the minimum video memory should be 192M.
+ *
+ * Arguments:
+ * aper_size system agp aperture size (in MB)
+ *
+ * Returns:
+ * The max possible number of agp memory pages available to users
+ */
+static uint32_t
+get_max_pages(uint32_t aper_size)
+{
+ uint32_t i, j, size;
+
+ ASSERT(aper_size <= MAXAPERMEGAS);
+
+ i = AGP_MB2PAGES(aper_size);
+ j = (physmem >> 2);
+
+ size = ((i < j) ? i : j);
+
+ if (size < AGP_MB2PAGES(MINAPERMEGAS))
+ size = AGP_MB2PAGES(MINAPERMEGAS);
+ return (size);
+}
+
+/*
+ * agp_fill_empty_keyent()
+ *
+ * Description:
+ * This function finds a empty key table slot and
+ * fills it with a new entity.
+ *
+ * Arguments:
+ * softsate driver soft state pointer
+ * entryp new entity data pointer
+ *
+ * Returns:
+ * NULL no key table slot available
+ * entryp the new entity slot pointer
+ */
+static keytable_ent_t *
+agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
+{
+ int key;
+ keytable_ent_t *newentryp;
+
+ ASSERT(softstate);
+ ASSERT(entryp);
+ ASSERT(entryp->kte_memhdl);
+ ASSERT(entryp->kte_pfnarray);
+ ASSERT(mutex_owned(&softstate->asoft_instmutex));
+
+ for (key = 0; key < AGP_MAXKEYS; key++) {
+ newentryp = &softstate->asoft_table[key];
+ if (newentryp->kte_memhdl == NULL) {
+ break;
+ }
+ }
+
+ if (key >= AGP_MAXKEYS) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_fill_empty_keyent: key table exhausted"));
+ return (NULL);
+ }
+
+ ASSERT(newentryp->kte_pfnarray == NULL);
+ bcopy(entryp, newentryp, sizeof (keytable_ent_t));
+ newentryp->kte_key = key;
+
+ return (newentryp);
+}
+
+/*
+ * agp_find_bound_keyent()
+ *
+ * Description:
+ * This function finds the key table entity by agp aperture page offset.
+ * Every keytable entity will have an agp aperture range after the binding
+ * operation.
+ *
+ * Arguments:
+ * softsate driver soft state pointer
+ * pg_offset agp aperture page offset
+ *
+ * Returns:
+ * NULL no such keytable entity
+ * pointer key table entity pointer found
+ */
+static keytable_ent_t *
+agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset)
+{
+ int keycount;
+ keytable_ent_t *entryp;
+
+ ASSERT(softstate);
+ ASSERT(mutex_owned(&softstate->asoft_instmutex));
+
+ for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) {
+ entryp = &softstate->asoft_table[keycount];
+ if (entryp->kte_bound == 0) {
+ continue;
+ }
+
+ if (pg_offset < entryp->kte_pgoff)
+ continue;
+ if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages))
+ continue;
+
+ ASSERT(entryp->kte_memhdl);
+ ASSERT(entryp->kte_pfnarray);
+
+ return (entryp);
+ }
+
+ return (NULL);
+}
+
+/*
+ * agp_check_off()
+ *
+ * Description:
+ * This function checks whether an AGP aperture range to be bound
+ * overlaps with AGP offset already bound.
+ *
+ * Arguments:
+ * entryp key table start entry pointer
+ * pg_start AGP range start page offset
+ * pg_num pages number to be bound
+ *
+ * Returns:
+ * 0 Does not overlap
+ * -1 Overlaps
+ */
+
+static int
+agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num)
+{
+ int key;
+ uint64_t pg_end;
+ uint64_t kpg_end;
+
+ ASSERT(entryp);
+
+ pg_end = pg_start + pg_num;
+ for (key = 0; key < AGP_MAXKEYS; key++) {
+ if (!entryp[key].kte_bound)
+ continue;
+
+ kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages;
+ if (!((pg_end <= entryp[key].kte_pgoff) ||
+ (pg_start >= kpg_end)))
+ break;
+ }
+
+ if (key == AGP_MAXKEYS)
+ return (0);
+ else
+ return (-1);
+}
+
+static int
+is_controlling_proc(agpgart_softstate_t *st)
+{
+ ASSERT(st);
+
+ if (!st->asoft_acquired) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_setup: gart not acquired"));
+ return (-1);
+ }
+ if (st->asoft_curpid != ddi_get_pid()) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_release: not controlling process"));
+ return (-1);
+ }
+
+ return (0);
+}
+
+static void release_control(agpgart_softstate_t *st)
+{
+ st->asoft_curpid = 0;
+ st->asoft_acquired = 0;
+}
+
+static void acquire_control(agpgart_softstate_t *st)
+{
+ st->asoft_curpid = ddi_get_pid();
+ st->asoft_acquired = 1;
+}
+
+/*
+ * agp_remove_from_gart()
+ *
+ * Description:
+ * This function fills the gart table entries by a given page
+ * frame number array and setup the agp aperture page to physical
+ * memory page translation.
+ * Arguments:
+ * pg_offset Starting aperture page to be bound
+ * entries the number of pages to be bound
+ * acc_hdl GART table dma memory acc handle
+ * tablep GART table kernel virtual address
+ */
+static void
+agp_remove_from_gart(
+ uint32_t pg_offset,
+ uint32_t entries,
+ ddi_dma_handle_t dma_hdl,
+ uint32_t *tablep)
+{
+ uint32_t items = 0;
+ uint32_t *entryp;
+
+ entryp = tablep + pg_offset;
+ while (items < entries) {
+ *(entryp + items) = 0;
+ items++;
+ }
+ (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
+ entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
+}
+
+/*
+ * agp_unbind_key()
+ *
+ * Description:
+ * This function unbinds AGP memory from the gart table. It will clear
+ * all the gart entries related to this agp memory.
+ *
+ * Arguments:
+ * softstate driver soft state pointer
+ * entryp key table entity pointer
+ *
+ * Returns:
+ * EINVAL invalid key table entity pointer
+ * 0 success
+ *
+ */
+static int
+agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
+{
+ int retval = 0;
+
+ ASSERT(entryp);
+ ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
+
+ if (!entryp->kte_bound) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_unbind_key: key = 0x%x, not bound",
+ entryp->kte_key));
+ return (EINVAL);
+ }
+ if (entryp->kte_refcnt) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_unbind_key: memory is exported to users"));
+ return (EINVAL);
+ }
+
+ ASSERT((entryp->kte_pgoff + entryp->kte_pages) <=
+ AGP_MB2PAGES(softstate->asoft_info.agpki_apersize));
+ ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN));
+
+ switch (softstate->asoft_devreg.agprd_arctype) {
+ case ARC_IGD810:
+ case ARC_IGD830:
+ retval = lyr_i8xx_remove_from_gtt(
+ entryp->kte_pgoff, entryp->kte_pages,
+ &softstate->asoft_devreg);
+ if (retval) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_unbind_key: Key = 0x%x, clear table error",
+ entryp->kte_key));
+ return (EIO);
+ }
+ break;
+ case ARC_INTELAGP:
+ case ARC_AMD64AGP:
+ agp_remove_from_gart(entryp->kte_pgoff,
+ entryp->kte_pages,
+ softstate->gart_dma_handle,
+ (uint32_t *)softstate->gart_vbase);
+ /* Flush GTLB table */
+ lyr_flush_gart_cache(&softstate->asoft_devreg);
+
+ break;
+ }
+
+ entryp->kte_bound = 0;
+
+ return (0);
+}
+
+/*
+ * agp_dealloc_kmem()
+ *
+ * Description:
+ * This function deallocates dma memory resources for userland
+ * applications.
+ *
+ * Arguments:
+ * entryp keytable entity pointer
+ */
+static void
+agp_dealloc_kmem(keytable_ent_t *entryp)
+{
+ kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
+ entryp->kte_pfnarray = NULL;
+
+ (void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle);
+ KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0;
+ ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl);
+ KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL;
+ KMEMP(entryp->kte_memhdl)->kmem_reallen = 0;
+ KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL;
+
+ ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle));
+ KMEMP(entryp->kte_memhdl)->kmem_handle = NULL;
+
+ kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t));
+ entryp->kte_memhdl = NULL;
+}
+
+/*
+ * agp_dealloc_mem()
+ *
+ * Description:
+ * This function deallocates physical memory resources allocated for
+ * userland applications.
+ *
+ * Arguments:
+ * st driver soft state pointer
+ * entryp key table entity pointer
+ *
+ * Returns:
+ * -1 not a valid memory type or the memory is mapped by
+ * user area applications
+ * 0 success
+ */
+static int
+agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t *entryp)
+{
+
+ ASSERT(entryp);
+ ASSERT(st);
+ ASSERT(entryp->kte_memhdl);
+ ASSERT(mutex_owned(&st->asoft_instmutex));
+
+ /* auto unbind here */
+ if (entryp->kte_bound && !entryp->kte_refcnt) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_dealloc_mem: key=0x%x, auto unbind",
+ entryp->kte_key));
+
+ /*
+ * agp_dealloc_mem may be called indirectly by agp_detach.
+ * In the agp_detach function, agpgart_close is already
+ * called which will free the gart table. agp_unbind_key
+ * will panic if no valid gart table exists. So test if
+ * gart table exsits here.
+ */
+ if (st->asoft_opened)
+ (void) agp_unbind_key(st, entryp);
+ }
+ if (entryp->kte_refcnt) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_dealloc_mem: memory is exported to users"));
+ return (-1);
+ }
+
+ switch (entryp->kte_type) {
+ case AGP_NORMAL:
+ case AGP_PHYSICAL:
+ agp_dealloc_kmem(entryp);
+ break;
+ default:
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * agp_del_allkeys()
+ *
+ * Description:
+ * This function calls agp_dealloc_mem to release all the agp memory
+ * resource allocated.
+ *
+ * Arguments:
+ * softsate driver soft state pointer
+ * Returns:
+ * -1 can not free all agp memory
+ * 0 success
+ *
+ */
+static int
+agp_del_allkeys(agpgart_softstate_t *softstate)
+{
+ int key;
+ int ret = 0;
+
+ ASSERT(softstate);
+ for (key = 0; key < AGP_MAXKEYS; key++) {
+ if (softstate->asoft_table[key].kte_memhdl != NULL) {
+ /*
+ * Check if we can free agp memory now.
+ * If agp memory is exported to user
+ * applications, agp_dealloc_mem will fail.
+ */
+ if (agp_dealloc_mem(softstate,
+ &softstate->asoft_table[key]))
+ ret = -1;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * pfn2gartentry()
+ *
+ * Description:
+ * This function converts a physical address to GART entry.
+ * For AMD64, hardware only support addresses below 40bits,
+ * about 1024G physical address, so the largest pfn
+ * number is below 28 bits. Please refer to GART and GTT entry
+ * format table in agpdefs.h for entry format. Intel IGD only
+ * only supports GTT entry below 1G. Intel AGP only supports
+ * GART entry below 4G.
+ *
+ * Arguments:
+ * arc_type system agp arc type
+ * pfn page frame number
+ * itemv the entry item to be returned
+ * Returns:
+ * -1 not a invalid page frame
+ * 0 conversion success
+ */
+static int
+pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv)
+{
+ uint64_t paddr;
+
+ paddr = (uint64_t)pfn << AGP_PAGE_SHIFT;
+ AGPDB_PRINT1((CE_NOTE, "checking pfn number %lu for type %d",
+ pfn, arc_type));
+
+ switch (arc_type) {
+ case ARC_INTELAGP:
+ {
+ /* Only support 32-bit hardware address */
+ if ((paddr & AGP_INTEL_POINTER_MASK) != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "INTEL AGP Hardware only support 32 bits"));
+ return (-1);
+ }
+ *itemv = (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID;
+
+ break;
+ }
+ case ARC_AMD64AGP:
+ {
+ uint32_t value1, value2;
+ /* Physaddr should not exceed 40-bit */
+ if ((paddr & AMD64_POINTER_MASK) != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "AMD64 GART hardware only supoort 40 bits"));
+ return (-1);
+ }
+ value1 = (uint32_t)pfn >> 20;
+ value1 <<= 4;
+ value2 = (uint32_t)pfn << 12;
+
+ *itemv = value1 | value2 | AMD64_ENTRY_VALID;
+ break;
+ }
+ case ARC_IGD810:
+ if ((paddr & I810_POINTER_MASK) != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "Intel i810 only support 30 bits"));
+ return (-1);
+ }
+ break;
+
+ case ARC_IGD830:
+ if ((paddr & GTT_POINTER_MASK) != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "Intel IGD only support 32 bits"));
+ return (-1);
+ }
+ break;
+ default:
+ AGPDB_PRINT2((CE_WARN,
+ "pfn2gartentry: arc type = %d, not support", arc_type));
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Check allocated physical pages validity, only called in DEBUG
+ * mode.
+ */
+static int
+agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items)
+{
+ int count;
+ uint32_t ret;
+
+ for (count = 0; count < items; count++) {
+ if (pfn2gartentry(arc_type, pfnarray[count], &ret))
+ break;
+ }
+ if (count < items)
+ return (-1);
+ else
+ return (0);
+}
+
+/*
+ * kmem_getpfns()
+ *
+ * Description:
+ * This function gets page frame numbers from dma handle.
+ *
+ * Arguments:
+ * dma_handle dma hanle allocated by ddi_dma_alloc_handle
+ * dma_cookip dma cookie pointer
+ * cookies_num cookies number
+ * pfnarray array to store page frames
+ *
+ * Returns:
+ * 0 success
+ */
+static int
+kmem_getpfns(
+ ddi_dma_handle_t dma_handle,
+ ddi_dma_cookie_t *dma_cookiep,
+ int cookies_num,
+ pfn_t *pfnarray)
+{
+ int num_cookies;
+ int index = 0;
+
+ num_cookies = cookies_num;
+
+ while (num_cookies > 0) {
+ uint64_t ck_startaddr, ck_length, ck_end;
+ ck_startaddr = dma_cookiep->dmac_address;
+ ck_length = dma_cookiep->dmac_size;
+
+ ck_end = ck_startaddr + ck_length;
+ while (ck_startaddr < ck_end) {
+ pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT;
+ ck_startaddr += AGP_PAGE_SIZE;
+ index++;
+ }
+
+ num_cookies--;
+ if (num_cookies > 0) {
+ ddi_dma_nextcookie(dma_handle, dma_cookiep);
+ }
+ }
+
+ return (0);
+}
+
+static int
+copyinfo(agpgart_softstate_t *softstate, agp_info_t *info)
+{
+ switch (softstate->asoft_devreg.agprd_arctype) {
+ case ARC_IGD810:
+ case ARC_IGD830:
+ info->agpi_version.agpv_major = 0;
+ info->agpi_version.agpv_minor = 0;
+ info->agpi_devid = softstate->asoft_info.agpki_mdevid;
+ info->agpi_mode = 0;
+ break;
+ case ARC_INTELAGP:
+ case ARC_AMD64AGP:
+ info->agpi_version = softstate->asoft_info.agpki_tver;
+ info->agpi_devid = softstate->asoft_info.agpki_tdevid;
+ info->agpi_mode = softstate->asoft_info.agpki_tstatus;
+ break;
+ default:
+ AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC"));
+ return (-1);
+ }
+ /*
+ * 64bit->32bit conversion possible
+ */
+ info->agpi_aperbase = softstate->asoft_info.agpki_aperbase;
+ info->agpi_apersize = softstate->asoft_info.agpki_apersize;
+ info->agpi_pgtotal = softstate->asoft_pgtotal;
+ info->agpi_pgsystem = info->agpi_pgtotal;
+ info->agpi_pgused = softstate->asoft_pgused;
+
+ return (0);
+}
+
+static uint32_t
+agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
+{
+ uint32_t cmd;
+ int rq, sba, over4g, fw, rate;
+
+ /*
+ * tstatus: target device status
+ * mstatus: master device status
+ * mode: the agp mode to be sent
+ */
+
+ /*
+ * RQ - Request Queue size
+ * set RQ to the min of mode and tstatus
+ * if mode set a RQ larger than hardware can support,
+ * use the max RQ which hardware can support.
+ * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
+ * Corelogic will enqueue agp transaction
+ */
+ rq = mode & AGPSTAT_RQ_MASK;
+ if ((tstatus & AGPSTAT_RQ_MASK) < rq)
+ rq = tstatus & AGPSTAT_RQ_MASK;
+
+ /*
+ * SBA - Sideband Addressing
+ *
+ * Sideband Addressing provides an additional bus to pass requests
+ * (address and command) to the target from the master.
+ *
+ * set SBA if all three support it
+ */
+ sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA)
+ & (mode & AGPSTAT_SBA);
+
+ /* set OVER4G if all three support it */
+ over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
+ & (mode & AGPSTAT_OVER4G);
+
+ /*
+ * FW - fast write
+ *
+ * acceleration of memory write transactions from the corelogic to the
+ * A.G.P. master device acting like a PCI target.
+ *
+ * set FW if all three support it
+ */
+ fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
+ & (mode & AGPSTAT_FW);
+
+ /*
+ * figure out the max rate
+ * AGP v2 support: 4X, 2X, 1X speed
+ * status bit meaning
+ * ---------------------------------------------
+ * 7:3 others
+ * 3 0 stand for V2 support
+ * 0:2 001:1X, 010:2X, 100:4X
+ * ----------------------------------------------
+ */
+ rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
+ & (mode & AGPSTAT_RATE_MASK);
+ if (rate & AGP2_RATE_4X)
+ rate = AGP2_RATE_4X;
+ else if (rate & AGP2_RATE_2X)
+ rate = AGP2_RATE_2X;
+ else
+ rate = AGP2_RATE_1X;
+
+ cmd = rq | sba | over4g | fw | rate;
+ /* enable agp mode */
+ cmd |= AGPCMD_AGPEN;
+
+ return (cmd);
+}
+
+static uint32_t
+agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
+{
+ uint32_t cmd = 0;
+ uint32_t rq, arqsz, cal, sba, over4g, fw, rate;
+
+ /*
+ * tstatus: target device status
+ * mstatus: master device status
+ * mode: the agp mode to be set
+ */
+
+ /*
+ * RQ - Request Queue size
+ * Set RQ to the min of mode and tstatus
+ * If mode set a RQ larger than hardware can support,
+ * use the max RQ which hardware can support.
+ * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
+ * Corelogic will enqueue agp transaction;
+ */
+ rq = mode & AGPSTAT_RQ_MASK;
+ if ((tstatus & AGPSTAT_RQ_MASK) < rq)
+ rq = tstatus & AGPSTAT_RQ_MASK;
+
+ /*
+ * ARQSZ - Asynchronous Request Queue size
+ * Set the value equal to tstatus.
+ * Don't allow the mode register to override values
+ */
+ arqsz = tstatus & AGPSTAT_ARQSZ_MASK;
+
+ /*
+ * CAL - Calibration cycle
+ * Set to the min of tstatus and mstatus
+ * Don't allow override by mode register
+ */
+ cal = tstatus & AGPSTAT_CAL_MASK;
+ if ((mstatus & AGPSTAT_CAL_MASK) < cal)
+ cal = mstatus & AGPSTAT_CAL_MASK;
+
+ /*
+ * SBA - Sideband Addressing
+ *
+ * Sideband Addressing provides an additional bus to pass requests
+ * (address and command) to the target from the master.
+ *
+ * SBA in agp v3.0 must be set
+ */
+ sba = AGPCMD_SBAEN;
+
+ /* GART64B is not set since no hardware supports it now */
+
+ /* Set OVER4G if all three support it */
+ over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
+ & (mode & AGPSTAT_OVER4G);
+
+ /*
+ * FW - fast write
+ *
+ * Acceleration of memory write transactions from the corelogic to the
+ * A.G.P. master device acting like a PCI target.
+ *
+ * Always set FW in AGP 3.0
+ */
+ fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
+ & (mode & AGPSTAT_FW);
+
+ /*
+ * Figure out the max rate
+ *
+ * AGP v3 support: 8X, 4X speed
+ *
+ * status bit meaning
+ * ---------------------------------------------
+ * 7:3 others
+ * 3 1 stand for V3 support
+ * 0:2 001:4X, 010:8X, 011:4X,8X
+ * ----------------------------------------------
+ */
+ rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
+ & (mode & AGPSTAT_RATE_MASK);
+ if (rate & AGP3_RATE_8X)
+ rate = AGP3_RATE_8X;
+ else
+ rate = AGP3_RATE_4X;
+
+ cmd = rq | arqsz | cal | sba | over4g | fw | rate;
+ /* Enable AGP mode */
+ cmd |= AGPCMD_AGPEN;
+
+ return (cmd);
+}
+
+static int
+agp_setup(agpgart_softstate_t *softstate, uint32_t mode)
+{
+ uint32_t tstatus, mstatus;
+ uint32_t agp_mode;
+
+ tstatus = softstate->asoft_info.agpki_tstatus;
+ mstatus = softstate->asoft_info.agpki_mstatus;
+
+ /*
+ * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0
+ * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we
+ * only check 2.0 and 3.0 mode. AGP 3.0 device can work in
+ * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register,
+ * we can get which mode it is working at. The working mode of
+ * AGP master and AGP target must be consistent. That is, both
+ * of them must work on AGP 3.0 mode or AGP 2.0 mode.
+ */
+ if ((softstate->asoft_info.agpki_tver.agpv_major == 3) &&
+ (tstatus & AGPSTAT_MODE3)) {
+ /* Master device should be 3.0 mode, too */
+ if ((softstate->asoft_info.agpki_mver.agpv_major != 3) ||
+ ((mstatus & AGPSTAT_MODE3) == 0))
+ return (EIO);
+
+ agp_mode = agp_v3_setup(tstatus, mstatus, mode);
+ /* Write to the AGPCMD register of target and master devices */
+ if (lyr_set_agp_cmd(agp_mode,
+ &softstate->asoft_devreg))
+ return (EIO);
+
+ softstate->asoft_mode = agp_mode;
+
+ return (0);
+ }
+
+ /*
+ * If agp taget device doesn't work in AGP 3.0 mode,
+ * it must work in AGP 2.0 mode. And make sure
+ * master device work in AGP 2.0 mode too
+ */
+ if ((softstate->asoft_info.agpki_mver.agpv_major == 3) &&
+ (mstatus & AGPSTAT_MODE3))
+ return (EIO);
+
+ agp_mode = agp_v2_setup(tstatus, mstatus, mode);
+ if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg))
+ return (EIO);
+ softstate->asoft_mode = agp_mode;
+
+ return (0);
+}
+
+/*
+ * agp_alloc_kmem()
+ *
+ * Description:
+ * This function allocates physical memory for userland applications
+ * by ddi interfaces. This function can also be called to allocate
+ * small phsyical contiguous pages, usually tens of kilobytes.
+ *
+ * Arguments:
+ * softsate driver soft state pointer
+ * length memory size
+ *
+ * Returns:
+ * entryp new keytable entity pointer
+ * NULL no keytable slot available or no physical
+ * memory available
+ */
+static keytable_ent_t *
+agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length, int type)
+{
+ keytable_ent_t keyentry;
+ keytable_ent_t *entryp;
+ int ret;
+
+ ASSERT(AGP_ALIGNED(length));
+
+ bzero(&keyentry, sizeof (keytable_ent_t));
+
+ keyentry.kte_pages = AGP_BYTES2PAGES(length);
+ keyentry.kte_type = type;
+
+ /*
+ * Set dma_attr_sgllen to assure contiguous physical pages
+ */
+ if (type == AGP_PHYSICAL)
+ agpgart_dma_attr.dma_attr_sgllen = 1;
+ else
+ agpgart_dma_attr.dma_attr_sgllen = (int)keyentry.kte_pages;
+
+ /* 4k size pages */
+ keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP);
+
+ if (ddi_dma_alloc_handle(softstate->asoft_dip,
+ &agpgart_dma_attr,
+ DDI_DMA_SLEEP, NULL,
+ &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_alloc_kmem: ddi_dma_allco_hanlde error"));
+ goto err4;
+ }
+
+ if ((ret = ddi_dma_mem_alloc(
+ KMEMP(keyentry.kte_memhdl)->kmem_handle,
+ length,
+ &gart_dev_acc_attr,
+ DDI_DMA_CONSISTENT,
+ DDI_DMA_SLEEP, NULL,
+ &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
+ &KMEMP(keyentry.kte_memhdl)->kmem_reallen,
+ &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_alloc_kmem: ddi_dma_mem_alloc error"));
+
+ goto err3;
+ }
+
+ ret = ddi_dma_addr_bind_handle(
+ KMEMP(keyentry.kte_memhdl)->kmem_handle,
+ NULL,
+ KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
+ length,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ DDI_DMA_SLEEP,
+ NULL,
+ &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
+ &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num);
+
+ /*
+ * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more
+ * than one cookie, we check this in the if statement.
+ */
+
+ if ((ret != DDI_DMA_MAPPED) ||
+ ((agpgart_dma_attr.dma_attr_sgllen == 1) &&
+ (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1))) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_alloc_kmem: can not alloc physical memory properly"));
+ goto err2;
+ }
+
+ keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
+ keyentry.kte_pages, KM_SLEEP);
+
+ if (kmem_getpfns(
+ KMEMP(keyentry.kte_memhdl)->kmem_handle,
+ &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
+ KMEMP(keyentry.kte_memhdl)->kmem_cookies_num,
+ keyentry.kte_pfnarray)) {
+ AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error"));
+ goto err1;
+ }
+
+ ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
+ keyentry.kte_pfnarray, keyentry.kte_pages));
+ if (agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
+ keyentry.kte_pfnarray, keyentry.kte_pages))
+ goto err1;
+ entryp = agp_fill_empty_keyent(softstate, &keyentry);
+ if (!entryp) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_alloc_kmem: agp_fill_empty_keyent error"));
+
+ goto err1;
+ }
+ ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
+
+ return (entryp);
+
+err1:
+ kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
+ keyentry.kte_pfnarray = NULL;
+ (void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle);
+ KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0;
+err2:
+ ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl);
+ KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL;
+ KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0;
+ KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL;
+err3:
+ ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle));
+ KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL;
+err4:
+ kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t));
+ keyentry.kte_memhdl = NULL;
+ return (NULL);
+
+}
+
+/*
+ * agp_alloc_mem()
+ *
+ * Description:
+ * This function allocate physical memory for userland applications,
+ * in order to save kernel virtual space, we use the direct mapping
+ * memory interface if it is available.
+ *
+ * Arguments:
+ * st driver soft state pointer
+ * length memory size
+ * type AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
+ * memory type for intel i810 IGD
+ *
+ * Returns:
+ * NULL Invalid memory type or can not allocate memory
+ * Keytable entry pointer returned by agp_alloc_kmem
+ */
+static keytable_ent_t *
+agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type)
+{
+
+ /*
+ * AGP_PHYSICAL type require contiguous physical pages exported
+ * to X drivers, like i810 HW cursor, ARGB cursor. the number of
+ * pages needed is usuallysmall and contiguous, 4K, 16K. So we
+ * use DDI interface to allocated such memory. And X use xsvc
+ * drivers to map this memory into its own address space.
+ */
+ ASSERT(st);
+
+ switch (type) {
+ case AGP_NORMAL:
+ case AGP_PHYSICAL:
+ return (agp_alloc_kmem(st, length, type));
+ default:
+ return (NULL);
+ }
+}
+
+/*
+ * free_gart_table()
+ *
+ * Description:
+ * This function frees the gart table memory allocated by driver.
+ * Must disable gart table before calling this function.
+ *
+ * Arguments:
+ * softstate driver soft state pointer
+ *
+ */
+static void
+free_gart_table(agpgart_softstate_t *st)
+{
+
+ if (st->gart_dma_handle == NULL)
+ return;
+
+ (void) ddi_dma_unbind_handle(st->gart_dma_handle);
+ ddi_dma_mem_free(&st->gart_dma_acc_handle);
+ st->gart_dma_acc_handle = NULL;
+ ddi_dma_free_handle(&st->gart_dma_handle);
+ st->gart_dma_handle = NULL;
+ st->gart_vbase = 0;
+ st->gart_size = 0;
+}
+
+/*
+ * alloc_gart_table()
+ *
+ * Description:
+ * This function allocates one physical continuous gart table.
+ * INTEL integrated video device except i810 have their special
+ * video bios; No need to allocate gart table for them.
+ *
+ * Arguments:
+ * st driver soft state pointer
+ *
+ * Returns:
+ * 0 success
+ * -1 can not allocate gart tabl
+ */
+static int
+alloc_gart_table(agpgart_softstate_t *st)
+{
+ int num_pages;
+ size_t table_size;
+ int ret = DDI_SUCCESS;
+ ddi_dma_cookie_t cookie;
+ uint32_t num_cookies;
+
+ num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize);
+
+ /*
+ * Only 40-bit maximum physical memory is supported by today's
+ * AGP hardware (32-bit gart tables can hold 40-bit memory addresses).
+ * No one supports 64-bit gart entries now, so the size of gart
+ * entries defaults to 32-bit though AGP3.0 specifies the possibility
+ * of 64-bit gart entries.
+ */
+
+ table_size = num_pages * (sizeof (uint32_t));
+
+ /*
+ * Only AMD64 can put gart table above 4G, 40 bits at maximum
+ */
+ if (st->asoft_devreg.agprd_arctype == ARC_AMD64AGP)
+ garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL;
+ else
+ garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU;
+ /* Allocate physical continuous page frame for gart table */
+ if (ret = ddi_dma_alloc_handle(st->asoft_dip,
+ &garttable_dma_attr,
+ DDI_DMA_SLEEP,
+ NULL, &st->gart_dma_handle)) {
+ AGPDB_PRINT2((CE_WARN,
+ "alloc_gart_table: ddi_dma_alloc_handle failed"));
+ goto err3;
+ }
+
+ if (ret = ddi_dma_mem_alloc(st->gart_dma_handle,
+ table_size,
+ &gart_dev_acc_attr,
+ DDI_DMA_CONSISTENT,
+ DDI_DMA_SLEEP, NULL,
+ &st->gart_vbase,
+ &st->gart_size,
+ &st->gart_dma_acc_handle)) {
+ AGPDB_PRINT2((CE_WARN,
+ "alloc_gart_table: ddi_dma_mem_alloc failed"));
+ goto err2;
+
+ }
+
+ ret = ddi_dma_addr_bind_handle(st->gart_dma_handle,
+ NULL, st->gart_vbase,
+ table_size,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ DDI_DMA_SLEEP, NULL,
+ &cookie, &num_cookies);
+
+ st->gart_pbase = cookie.dmac_address;
+
+ if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) {
+ if (num_cookies > 1)
+ (void) ddi_dma_unbind_handle(st->gart_dma_handle);
+ AGPDB_PRINT2((CE_WARN,
+ "alloc_gart_table: alloc contiguous phys memory failed"));
+ goto err1;
+ }
+
+ return (0);
+err1:
+ ddi_dma_mem_free(&st->gart_dma_acc_handle);
+ st->gart_dma_acc_handle = NULL;
+err2:
+ ddi_dma_free_handle(&st->gart_dma_handle);
+ st->gart_dma_handle = NULL;
+err3:
+ st->gart_pbase = 0;
+ st->gart_size = 0;
+ st->gart_vbase = 0;
+
+ return (-1);
+}
+
+/*
+ * agp_add_to_gart()
+ *
+ * Description:
+ * This function fills the gart table entries by a given page frame number
+ * array and set up the agp aperture page to physical memory page
+ * translation.
+ * Arguments:
+ * type valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP,
+ * ARC_AMD64AGP
+ * pfnarray allocated physical page frame number array
+ * pg_offset agp aperture start page to be bound
+ * entries the number of pages to be bound
+ * dma_hdl gart table dma memory handle
+ * tablep gart table kernel virtual address
+ * Returns:
+ * -1 failed
+ * 0 success
+ */
+static int
+agp_add_to_gart(
+ agp_arc_type_t type,
+ pfn_t *pfnarray,
+ uint32_t pg_offset,
+ uint32_t entries,
+ ddi_dma_handle_t dma_hdl,
+ uint32_t *tablep)
+{
+ int items = 0;
+ uint32_t *entryp;
+ uint32_t itemv;
+
+ entryp = tablep + pg_offset;
+ while (items < entries) {
+ if (pfn2gartentry(type, pfnarray[items], &itemv))
+ break;
+ *(entryp + items) = itemv;
+ items++;
+ }
+ if (items < entries)
+ return (-1);
+
+ (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
+ entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
+
+ return (0);
+}
+
+/*
+ * agp_bind_key()
+ *
+ * Description:
+ * This function will call low level gart table access functions to
+ * set up gart table translation. Also it will do some sanity
+ * checking on key table entry.
+ *
+ * Arguments:
+ * softstate driver soft state pointer
+ * keyent key table entity pointer to be bound
+ * pg_offset aperture start page to be bound
+ * Returns:
+ * EINVAL not a valid operation
+ */
+static int
+agp_bind_key(agpgart_softstate_t *softstate,
+ keytable_ent_t *keyent, uint32_t pg_offset)
+{
+ uint64_t pg_end;
+ int ret = 0;
+
+ ASSERT(keyent);
+ ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS));
+ ASSERT(mutex_owned(&softstate->asoft_instmutex));
+
+ pg_end = pg_offset + keyent->kte_pages;
+
+ if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_bind_key: key=0x%x,exceed aper range",
+ keyent->kte_key));
+
+ return (EINVAL);
+ }
+
+ if (agp_check_off(softstate->asoft_table,
+ pg_offset, keyent->kte_pages)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped",
+ pg_offset, keyent->kte_pages));
+ return (EINVAL);
+ }
+
+ ASSERT(keyent->kte_pfnarray != NULL);
+
+ switch (softstate->asoft_devreg.agprd_arctype) {
+ case ARC_IGD810:
+ case ARC_IGD830:
+ ret = lyr_i8xx_add_to_gtt(pg_offset, keyent,
+ &softstate->asoft_devreg);
+ if (ret)
+ return (EIO);
+ break;
+ case ARC_INTELAGP:
+ case ARC_AMD64AGP:
+ ret = agp_add_to_gart(
+ softstate->asoft_devreg.agprd_arctype,
+ keyent->kte_pfnarray,
+ pg_offset,
+ keyent->kte_pages,
+ softstate->gart_dma_handle,
+ (uint32_t *)softstate->gart_vbase);
+ if (ret)
+ return (EINVAL);
+ /* Flush GTLB table */
+ lyr_flush_gart_cache(&softstate->asoft_devreg);
+ break;
+ default:
+ AGPDB_PRINT2((CE_WARN,
+ "agp_bind_key: arc type = 0x%x unsupported",
+ softstate->asoft_devreg.agprd_arctype));
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static int
+agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance;
+ agpgart_softstate_t *softstate;
+
+ if (cmd != DDI_ATTACH) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_attach: only attach op supported"));
+ return (DDI_FAILURE);
+ }
+ instance = ddi_get_instance(dip);
+
+ if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance)
+ != DDI_SUCCESS) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_attach: soft state zalloc failed"));
+ goto err1;
+
+ }
+ softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL);
+ softstate->asoft_dip = dip;
+ /*
+ * Allocate LDI identifier for agpgart driver
+ * Agpgart driver is the kernel consumer
+ */
+ if (ldi_ident_from_dip(dip, &softstate->asoft_li)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_attach: LDI indentifier allcation failed"));
+ goto err2;
+ }
+
+ softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN;
+ /* Install agp kstat */
+ if (agp_init_kstats(softstate)) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error"));
+ goto err3;
+ }
+ /*
+ * devfs will create /dev/agpgart
+ * and /devices/agpgart:agpgart
+ */
+
+ if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR,
+ AGP_INST2MINOR(instance),
+ DDI_NT_AGP_PSEUDO, 0)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_attach: Can not create minor node"));
+ goto err4;
+ }
+
+ softstate->asoft_table = kmem_zalloc(
+ AGP_MAXKEYS * (sizeof (keytable_ent_t)),
+ KM_SLEEP);
+
+ list_head_init(&softstate->mapped_list);
+
+ return (DDI_SUCCESS);
+err4:
+ agp_fini_kstats(softstate);
+err3:
+ ldi_ident_release(softstate->asoft_li);
+err2:
+ ddi_soft_state_free(agpgart_glob_soft_handle, instance);
+err1:
+ return (DDI_FAILURE);
+}
+
+static int
+agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance;
+ agpgart_softstate_t *st;
+
+ instance = ddi_get_instance(dip);
+
+ st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ /*
+ * Caller should free all the memory allocated explicitly.
+ * We release the memory allocated by caller which is not
+ * properly freed. mutex_enter here make sure assertion on
+ * softstate mutex success in agp_dealloc_mem.
+ */
+ mutex_enter(&st->asoft_instmutex);
+ if (agp_del_allkeys(st)) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err"));
+ AGPDB_PRINT2((CE_WARN,
+ "you might free agp memory exported to your applications"));
+
+ mutex_exit(&st->asoft_instmutex);
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&st->asoft_instmutex);
+ if (st->asoft_table) {
+ kmem_free(st->asoft_table,
+ AGP_MAXKEYS * (sizeof (keytable_ent_t)));
+ st->asoft_table = 0;
+ }
+
+ struct list_head *entry, *temp, *head;
+ igd_gtt_seg_t *gttseg;
+ list_head_for_each_safe(entry, temp, &st->mapped_list) {
+ gttseg = entry->gttseg;
+ list_head_del(entry);
+ kmem_free(entry, sizeof (*entry));
+ kmem_free(gttseg->igs_phyaddr,
+ sizeof (uint32_t) * gttseg->igs_npage);
+ kmem_free(gttseg, sizeof (igd_gtt_seg_t));
+ }
+ head = &st->mapped_list;
+ kmem_free(head->next,
+ AGP_HASH_NODE * sizeof (struct list_head));
+ head->next = NULL;
+
+ ddi_remove_minor_node(dip, AGPGART_DEVNODE);
+ agp_fini_kstats(st);
+ ldi_ident_release(st->asoft_li);
+ mutex_destroy(&st->asoft_instmutex);
+ ddi_soft_state_free(agpgart_glob_soft_handle, instance);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
+ void **resultp)
+{
+ agpgart_softstate_t *st;
+ int instance, rval = DDI_FAILURE;
+ dev_t dev;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ dev = (dev_t)arg;
+ instance = AGP_DEV2INST(dev);
+ st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ if (st != NULL) {
+ mutex_enter(&st->asoft_instmutex);
+ *resultp = st->asoft_dip;
+ mutex_exit(&st->asoft_instmutex);
+ rval = DDI_SUCCESS;
+ } else
+ *resultp = NULL;
+
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ dev = (dev_t)arg;
+ instance = AGP_DEV2INST(dev);
+ *resultp = (void *)(uintptr_t)instance;
+ rval = DDI_SUCCESS;
+
+ break;
+ default:
+ break;
+ }
+
+ return (rval);
+}
+
+/*
+ * agpgart_open()
+ *
+ * Description:
+ * This function is the driver open entry point. If it is the
+ * first time the agpgart driver is opened, the driver will
+ * open other agp related layered drivers and set up the agpgart
+ * table properly.
+ *
+ * Arguments:
+ * dev device number pointer
+ * openflags open flags
+ * otyp OTYP_BLK, OTYP_CHR
+ * credp user's credential's struct pointer
+ *
+ * Returns:
+ * ENXIO operation error
+ * EAGAIN resoure temporarily unvailable
+ * 0 success
+ */
+/*ARGSUSED*/
+static int
+agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
+{
+ int instance = AGP_DEV2INST(*dev);
+ agpgart_softstate_t *softstate;
+ int rc = 0;
+ uint32_t devid;
+
+ if (secpolicy_gart_access(credp)) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_open: permission denied"));
+ return (EPERM);
+ }
+ softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ if (softstate == NULL) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err"));
+ return (ENXIO);
+ }
+
+ mutex_enter(&softstate->asoft_instmutex);
+
+ if (softstate->asoft_opened) {
+ softstate->asoft_opened++;
+ mutex_exit(&softstate->asoft_instmutex);
+ return (0);
+ }
+
+ /*
+ * The driver is opened first time, so we initialize layered
+ * driver interface and softstate member here.
+ */
+ softstate->asoft_pgused = 0;
+ if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed"));
+ mutex_exit(&softstate->asoft_instmutex);
+ return (EAGAIN);
+ }
+
+ /* Call into layered driver */
+ if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error"));
+ lyr_end(&softstate->asoft_devreg);
+ mutex_exit(&softstate->asoft_instmutex);
+ return (EIO);
+ }
+
+ /*
+ * BIOS already set up gtt table for ARC_IGD830
+ */
+ if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
+ softstate->asoft_opened++;
+
+ softstate->asoft_pgtotal =
+ get_max_pages(softstate->asoft_info.agpki_apersize);
+
+ if (lyr_config_devices(&softstate->asoft_devreg)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_open: lyr_config_devices error"));
+ lyr_end(&softstate->asoft_devreg);
+ mutex_exit(&softstate->asoft_instmutex);
+
+ return (EIO);
+ }
+ devid = softstate->asoft_info.agpki_mdevid;
+ if (IS_INTEL_915(devid) ||
+ IS_INTEL_965(devid) ||
+ IS_INTEL_X33(devid) ||
+ IS_INTEL_G4X(devid)) {
+ rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl,
+ INTEL_CHIPSET_FLUSH_SETUP, 0, FKIOCTL, kcred, 0);
+ }
+ if (rc) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_open: Intel chipset flush setup error"));
+ lyr_end(&softstate->asoft_devreg);
+ mutex_exit(&softstate->asoft_instmutex);
+ return (EIO);
+ }
+ mutex_exit(&softstate->asoft_instmutex);
+ return (0);
+ }
+
+ rc = alloc_gart_table(softstate);
+
+ /*
+ * Allocate physically contiguous pages for AGP arc or
+ * i810 arc. If failed, divide aper_size by 2 to
+ * reduce gart table size until 4 megabytes. This
+ * is just a workaround for systems with very few
+ * physically contiguous memory.
+ */
+ if (rc) {
+ while ((softstate->asoft_info.agpki_apersize >= 4) &&
+ (alloc_gart_table(softstate))) {
+ softstate->asoft_info.agpki_apersize >>= 1;
+ }
+ if (softstate->asoft_info.agpki_apersize >= 4)
+ rc = 0;
+ }
+
+ if (rc != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_open: alloc gart table failed"));
+ lyr_end(&softstate->asoft_devreg);
+ mutex_exit(&softstate->asoft_instmutex);
+ return (EAGAIN);
+ }
+
+ softstate->asoft_pgtotal =
+ get_max_pages(softstate->asoft_info.agpki_apersize);
+ /*
+ * BIOS doesn't initialize GTT for i810,
+ * So i810 GTT must be created by driver.
+ *
+ * Set up gart table and enable it.
+ */
+ if (lyr_set_gart_addr(softstate->gart_pbase,
+ &softstate->asoft_devreg)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_open: set gart table addr failed"));
+ free_gart_table(softstate);
+ lyr_end(&softstate->asoft_devreg);
+ mutex_exit(&softstate->asoft_instmutex);
+ return (EIO);
+ }
+ if (lyr_config_devices(&softstate->asoft_devreg)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_open: lyr_config_devices failed"));
+ free_gart_table(softstate);
+ lyr_end(&softstate->asoft_devreg);
+ mutex_exit(&softstate->asoft_instmutex);
+ return (EIO);
+ }
+
+ softstate->asoft_opened++;
+ mutex_exit(&softstate->asoft_instmutex);
+
+ return (0);
+}
+
+/*
+ * agpgart_close()
+ *
+ * Description:
+ * agpgart_close will release resources allocated in the first open
+ * and close other open layered drivers. Also it frees the memory
+ * allocated by ioctls.
+ *
+ * Arguments:
+ * dev device number
+ * flag file status flag
+ * otyp OTYP_BLK, OTYP_CHR
+ * credp user's credential's struct pointer
+ *
+ * Returns:
+ * ENXIO not an error, to support "deferred attach"
+ * 0 success
+ */
+/*ARGSUSED*/
+static int
+agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp)
+{
+ int instance = AGP_DEV2INST(dev);
+ agpgart_softstate_t *softstate;
+ int rc = 0;
+ uint32_t devid;
+
+ softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ if (softstate == NULL) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err"));
+ return (ENXIO);
+ }
+
+ mutex_enter(&softstate->asoft_instmutex);
+ ASSERT(softstate->asoft_opened);
+
+
+ /*
+ * If the last process close this device is not the controlling
+ * process, also release the control over agpgart driver here if the
+ * the controlling process fails to release the control before it
+ * close the driver.
+ */
+ if (softstate->asoft_acquired == 1) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_close: auto release control over driver"));
+ release_control(softstate);
+ }
+
+ devid = softstate->asoft_info.agpki_mdevid;
+ if (IS_INTEL_915(devid) ||
+ IS_INTEL_965(devid) ||
+ IS_INTEL_X33(devid) ||
+ IS_INTEL_G4X(devid)) {
+ rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl,
+ INTEL_CHIPSET_FLUSH_FREE, 0, FKIOCTL, kcred, 0);
+ }
+ if (rc) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_open: Intel chipset flush free error"));
+ }
+
+ if (lyr_unconfig_devices(&softstate->asoft_devreg)) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_close: lyr_unconfig_device error"));
+ mutex_exit(&softstate->asoft_instmutex);
+ return (EIO);
+ }
+ softstate->asoft_agpen = 0;
+
+ if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
+ free_gart_table(softstate);
+ }
+
+ lyr_end(&softstate->asoft_devreg);
+
+ /*
+ * This statement must be positioned before agp_del_allkeys
+ * agp_dealloc_mem indirectly called by agp_del_allkeys
+ * will test this variable.
+ */
+ softstate->asoft_opened = 0;
+
+ /*
+ * Free the memory allocated by user applications which
+ * was never deallocated.
+ */
+ (void) agp_del_allkeys(softstate);
+
+ mutex_exit(&softstate->asoft_instmutex);
+
+ return (0);
+}
+
+static int
+ioctl_agpgart_info(agpgart_softstate_t *softstate, void *arg, int flags)
+{
+ agp_info_t infostruct;
+#ifdef _MULTI_DATAMODEL
+ agp_info32_t infostruct32;
+#endif
+
+ bzero(&infostruct, sizeof (agp_info_t));
+
+#ifdef _MULTI_DATAMODEL
+ bzero(&infostruct32, sizeof (agp_info32_t));
+ if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
+ if (copyinfo(softstate, &infostruct))
+ return (EINVAL);
+
+ agpinfo_default_to_32(infostruct, infostruct32);
+ if (ddi_copyout(&infostruct32, arg,
+ sizeof (agp_info32_t), flags) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+#endif /* _MULTI_DATAMODEL */
+ if (copyinfo(softstate, &infostruct))
+ return (EINVAL);
+
+ if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) {
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
+static int
+ioctl_agpgart_acquire(agpgart_softstate_t *st)
+{
+ if (st->asoft_acquired) {
+ AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired"));
+ return (EBUSY);
+ }
+ acquire_control(st);
+ return (0);
+}
+
+static int
+ioctl_agpgart_release(agpgart_softstate_t *st)
+{
+ if (is_controlling_proc(st) < 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_release: not a controlling process"));
+ return (EPERM);
+ }
+ release_control(st);
+ return (0);
+}
+
+static int
+ioctl_agpgart_setup(agpgart_softstate_t *st, void *arg, int flags)
+{
+ agp_setup_t data;
+ int rc = 0;
+
+ if (is_controlling_proc(st) < 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_setup: not a controlling process"));
+ return (EPERM);
+ }
+
+ if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_setup: no true agp bridge"));
+ return (EINVAL);
+ }
+
+ if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0)
+ return (EFAULT);
+
+ if (rc = agp_setup(st, data.agps_mode))
+ return (rc);
+ /* Store agp mode status for kstat */
+ st->asoft_agpen = 1;
+ return (0);
+}
+
+static int
+ioctl_agpgart_alloc(agpgart_softstate_t *st, void *arg, int flags)
+{
+ agp_allocate_t alloc_info;
+ keytable_ent_t *entryp;
+ size_t length;
+ uint64_t pg_num;
+
+ if (is_controlling_proc(st) < 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_alloc: not a controlling process"));
+ return (EPERM);
+ }
+
+ if (ddi_copyin(arg, &alloc_info,
+ sizeof (agp_allocate_t), flags) != 0) {
+ return (EFAULT);
+ }
+ pg_num = st->asoft_pgused + alloc_info.agpa_pgcount;
+ if (pg_num > st->asoft_pgtotal) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_alloc: exceeding the memory pages limit"));
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_alloc: request %x pages failed",
+ alloc_info.agpa_pgcount));
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_alloc: pages used %x total is %x",
+ st->asoft_pgused, st->asoft_pgtotal));
+
+ return (EINVAL);
+ }
+
+ length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount);
+ entryp = agp_alloc_mem(st, length, alloc_info.agpa_type);
+ if (!entryp) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_alloc: allocate 0x%lx bytes failed",
+ length));
+ return (ENOMEM);
+ }
+ ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
+ alloc_info.agpa_key = entryp->kte_key;
+ if (alloc_info.agpa_type == AGP_PHYSICAL) {
+ alloc_info.agpa_physical =
+ (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT);
+ }
+ /* Update the memory pagse used */
+ st->asoft_pgused += alloc_info.agpa_pgcount;
+
+ if (ddi_copyout(&alloc_info, arg,
+ sizeof (agp_allocate_t), flags) != 0) {
+
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
+static int
+ioctl_agpgart_dealloc(agpgart_softstate_t *st, intptr_t arg)
+{
+ int key;
+ keytable_ent_t *keyent;
+
+ if (is_controlling_proc(st) < 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_dealloc: not a controlling process"));
+ return (EPERM);
+ }
+ key = (int)arg;
+ if ((key >= AGP_MAXKEYS) || key < 0) {
+ return (EINVAL);
+ }
+ keyent = &st->asoft_table[key];
+ if (!keyent->kte_memhdl) {
+ return (EINVAL);
+ }
+
+ if (agp_dealloc_mem(st, keyent))
+ return (EINVAL);
+
+ /* Update the memory pages used */
+ st->asoft_pgused -= keyent->kte_pages;
+ bzero(keyent, sizeof (keytable_ent_t));
+
+ return (0);
+}
+
+static int
+ioctl_agpgart_bind(agpgart_softstate_t *st, void *arg, int flags)
+{
+ agp_bind_t bind_info;
+ keytable_ent_t *keyent;
+ int key;
+ uint32_t pg_offset;
+ int retval = 0;
+
+ if (is_controlling_proc(st) < 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_bind: not a controlling process"));
+ return (EPERM);
+ }
+
+ if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) {
+ return (EFAULT);
+ }
+
+ key = bind_info.agpb_key;
+ if ((key >= AGP_MAXKEYS) || key < 0) {
+ AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key"));
+ return (EINVAL);
+ }
+
+ if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) {
+ if (AGP_PAGES2KB(bind_info.agpb_pgstart) <
+ st->asoft_info.agpki_presize) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_bind: bind to prealloc area "
+ "pgstart = %dKB < presize = %ldKB",
+ AGP_PAGES2KB(bind_info.agpb_pgstart),
+ st->asoft_info.agpki_presize));
+ return (EINVAL);
+ }
+ }
+
+ pg_offset = bind_info.agpb_pgstart;
+ keyent = &st->asoft_table[key];
+ if (!keyent->kte_memhdl) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_bind: Key = 0x%x can't get keyenty",
+ key));
+ return (EINVAL);
+ }
+
+ if (keyent->kte_bound != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_bind: Key = 0x%x already bound",
+ key));
+ return (EINVAL);
+ }
+ retval = agp_bind_key(st, keyent, pg_offset);
+
+ if (retval == 0) {
+ keyent->kte_pgoff = pg_offset;
+ keyent->kte_bound = 1;
+ }
+
+ return (retval);
+}
+
+static int
+ioctl_agpgart_unbind(agpgart_softstate_t *st, void *arg, int flags)
+{
+ int key, retval = 0;
+ agp_unbind_t unbindinfo;
+ keytable_ent_t *keyent;
+
+ if (is_controlling_proc(st) < 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "ioctl_agpgart_bind: not a controlling process"));
+ return (EPERM);
+ }
+
+ if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) {
+ return (EFAULT);
+ }
+ key = unbindinfo.agpu_key;
+ if ((key >= AGP_MAXKEYS) || key < 0) {
+ AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key"));
+ return (EINVAL);
+ }
+ keyent = &st->asoft_table[key];
+ if (!keyent->kte_bound) {
+ return (EINVAL);
+ }
+
+ if ((retval = agp_unbind_key(st, keyent)) != 0)
+ return (retval);
+
+ return (0);
+}
+
+static int
+ioctl_agpgart_flush_chipset(agpgart_softstate_t *st)
+{
+ ldi_handle_t hdl;
+ uint32_t devid;
+ int rc = 0;
+ devid = st->asoft_info.agpki_mdevid;
+ hdl = st->asoft_devreg.agprd_targethdl;
+ if (IS_INTEL_915(devid) ||
+ IS_INTEL_965(devid) ||
+ IS_INTEL_X33(devid) ||
+ IS_INTEL_G4X(devid)) {
+ rc = ldi_ioctl(hdl, INTEL_CHIPSET_FLUSH, 0, FKIOCTL, kcred, 0);
+ }
+ return (rc);
+}
+
+static int
+ioctl_agpgart_pages_bind(agpgart_softstate_t *st, void *arg, int flags)
+{
+ agp_bind_pages_t bind_info;
+ uint32_t pg_offset;
+ int err = 0;
+ ldi_handle_t hdl;
+ uint32_t npages;
+ igd_gtt_seg_t *gttseg;
+ uint32_t i;
+ int rval;
+ if (ddi_copyin(arg, &bind_info,
+ sizeof (agp_bind_pages_t), flags) != 0) {
+ return (EFAULT);
+ }
+
+ gttseg = (igd_gtt_seg_t *)kmem_zalloc(sizeof (igd_gtt_seg_t),
+ KM_SLEEP);
+
+ pg_offset = bind_info.agpb_pgstart;
+
+ gttseg->igs_pgstart = pg_offset;
+ npages = (uint32_t)bind_info.agpb_pgcount;
+ gttseg->igs_npage = npages;
+
+ gttseg->igs_type = AGP_NORMAL;
+ gttseg->igs_phyaddr = (uint32_t *)kmem_zalloc
+ (sizeof (uint32_t) * gttseg->igs_npage, KM_SLEEP);
+
+ for (i = 0; i < npages; i++) {
+ gttseg->igs_phyaddr[i] = bind_info.agpb_pages[i] <<
+ GTT_PAGE_SHIFT;
+ }
+
+ hdl = st->asoft_devreg.agprd_masterhdl;
+ if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)gttseg, FKIOCTL,
+ kcred, &rval)) {
+ AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: start0x%x",
+ gttseg->igs_pgstart));
+ AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: pages=0x%x",
+ gttseg->igs_npage));
+ AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: type=0x%x",
+ gttseg->igs_type));
+ err = -1;
+ }
+
+ list_head_add_new(&st->mapped_list, gttseg);
+ return (err);
+}
+
+static int
+ioctl_agpgart_pages_unbind(agpgart_softstate_t *st, void *arg, int flags)
+{
+ agp_unbind_pages_t unbind_info;
+ int rval;
+ ldi_handle_t hdl;
+ igd_gtt_seg_t *gttseg;
+
+ if (ddi_copyin(arg, &unbind_info, sizeof (unbind_info), flags) != 0) {
+ return (EFAULT);
+ }
+
+ struct list_head *entry, *temp;
+ list_head_for_each_safe(entry, temp, &st->mapped_list) {
+ if (entry->gttseg->igs_pgstart == unbind_info.agpb_pgstart) {
+ gttseg = entry->gttseg;
+ /* not unbind if VT switch */
+ if (unbind_info.agpb_type) {
+ list_head_del(entry);
+ kmem_free(entry, sizeof (*entry));
+ }
+ break;
+ }
+ }
+ ASSERT(gttseg != NULL);
+ gttseg->igs_pgstart = unbind_info.agpb_pgstart;
+ ASSERT(gttseg->igs_npage == unbind_info.agpb_pgcount);
+
+ hdl = st->asoft_devreg.agprd_masterhdl;
+ if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)gttseg, FKIOCTL,
+ kcred, &rval))
+ return (-1);
+
+ if (unbind_info.agpb_type) {
+ kmem_free(gttseg->igs_phyaddr, sizeof (uint32_t) *
+ gttseg->igs_npage);
+ kmem_free(gttseg, sizeof (igd_gtt_seg_t));
+ }
+
+ return (0);
+}
+
+static int
+ioctl_agpgart_pages_rebind(agpgart_softstate_t *st)
+{
+ int rval;
+ ldi_handle_t hdl;
+ igd_gtt_seg_t *gttseg;
+ int err = 0;
+
+ hdl = st->asoft_devreg.agprd_masterhdl;
+ struct list_head *entry, *temp;
+ list_head_for_each_safe(entry, temp, &st->mapped_list) {
+ gttseg = entry->gttseg;
+ list_head_del(entry);
+ kmem_free(entry, sizeof (*entry));
+ if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)gttseg, FKIOCTL,
+ kcred, &rval)) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_pages_rebind errori"));
+ err = -1;
+ break;
+ }
+ kmem_free(gttseg->igs_phyaddr, sizeof (uint32_t) *
+ gttseg->igs_npage);
+ kmem_free(gttseg, sizeof (igd_gtt_seg_t));
+
+ }
+ return (err);
+
+}
+
+/*ARGSUSED*/
+static int
+agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags,
+ cred_t *credp, int *rvalp)
+{
+ int instance;
+ int retval = 0;
+ void *arg = (void*)intarg;
+
+ agpgart_softstate_t *softstate;
+
+ instance = AGP_DEV2INST(dev);
+ softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ if (softstate == NULL) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err"));
+ return (ENXIO);
+ }
+
+ mutex_enter(&softstate->asoft_instmutex);
+
+ switch (cmd) {
+ case AGPIOC_INFO:
+ retval = ioctl_agpgart_info(softstate, arg, flags);
+ break;
+ case AGPIOC_ACQUIRE:
+ retval = ioctl_agpgart_acquire(softstate);
+ break;
+ case AGPIOC_RELEASE:
+ retval = ioctl_agpgart_release(softstate);
+ break;
+ case AGPIOC_SETUP:
+ retval = ioctl_agpgart_setup(softstate, arg, flags);
+ break;
+ case AGPIOC_ALLOCATE:
+ retval = ioctl_agpgart_alloc(softstate, arg, flags);
+ break;
+ case AGPIOC_DEALLOCATE:
+ retval = ioctl_agpgart_dealloc(softstate, intarg);
+ break;
+ case AGPIOC_BIND:
+ retval = ioctl_agpgart_bind(softstate, arg, flags);
+ break;
+ case AGPIOC_UNBIND:
+ retval = ioctl_agpgart_unbind(softstate, arg, flags);
+ break;
+ case AGPIOC_FLUSHCHIPSET:
+ retval = ioctl_agpgart_flush_chipset(softstate);
+ break;
+ case AGPIOC_PAGES_BIND:
+ retval = ioctl_agpgart_pages_bind(softstate, arg, flags);
+ break;
+ case AGPIOC_PAGES_UNBIND:
+ retval = ioctl_agpgart_pages_unbind(softstate, arg, flags);
+ break;
+ case AGPIOC_PAGES_REBIND:
+ retval = ioctl_agpgart_pages_rebind(softstate);
+ break;
+ default:
+ AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument"));
+ retval = ENXIO;
+ break;
+ }
+
+ mutex_exit(&softstate->asoft_instmutex);
+ return (retval);
+}
+
+static int
+agpgart_segmap(dev_t dev, off_t off, struct as *asp,
+ caddr_t *addrp, off_t len, unsigned int prot,
+ unsigned int maxprot, unsigned int flags, cred_t *credp)
+{
+
+ struct agpgart_softstate *softstate;
+ int instance;
+ int rc = 0;
+
+ instance = AGP_DEV2INST(dev);
+ softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ if (softstate == NULL) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err"));
+ return (ENXIO);
+ }
+ if (!AGP_ALIGNED(len))
+ return (EINVAL);
+
+ mutex_enter(&softstate->asoft_instmutex);
+
+ rc = devmap_setup(dev, (offset_t)off, asp, addrp,
+ (size_t)len, prot, maxprot, flags, credp);
+
+ mutex_exit(&softstate->asoft_instmutex);
+ return (rc);
+}
+
+/*ARGSUSED*/
+static int
+agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len,
+ size_t *mappedlen, uint_t model)
+{
+ struct agpgart_softstate *softstate;
+ int instance, status;
+ struct keytable_ent *mementry;
+ offset_t local_offset;
+
+ instance = AGP_DEV2INST(dev);
+ softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
+ if (softstate == NULL) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err"));
+ return (ENXIO);
+ }
+
+
+ if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) {
+ AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large"));
+ return (EINVAL);
+ }
+
+ /*
+ * Can not find any memory now, so fail.
+ */
+
+ mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
+
+ if (mementry == NULL) {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_devmap: can not find the proper keyent"));
+ return (EINVAL);
+ }
+
+ local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff);
+
+ if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) {
+ len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset;
+ }
+
+ switch (mementry->kte_type) {
+ case AGP_NORMAL:
+ if (PMEMP(mementry->kte_memhdl)->pmem_cookie) {
+ status = devmap_pmem_setup(cookie,
+ softstate->asoft_dip,
+ &agp_devmap_cb,
+ PMEMP(mementry->kte_memhdl)->pmem_cookie,
+ local_offset,
+ len, PROT_ALL,
+ (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE),
+ &mem_dev_acc_attr);
+ } else {
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_devmap: not a valid memory type"));
+ return (EINVAL);
+
+ }
+
+ break;
+ default:
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_devmap: not a valid memory type"));
+ return (EINVAL);
+ }
+
+
+ if (status == 0) {
+ *mappedlen = len;
+ } else {
+ *mappedlen = 0;
+ AGPDB_PRINT2((CE_WARN,
+ "agpgart_devmap: devmap interface failed"));
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static struct cb_ops agpgart_cb_ops = {
+ agpgart_open, /* open() */
+ agpgart_close, /* close() */
+ nodev, /* strategy() */
+ nodev, /* print routine */
+ nodev, /* no dump routine */
+ nodev, /* read() */
+ nodev, /* write() */
+ agpgart_ioctl, /* agpgart_ioctl */
+ agpgart_devmap, /* devmap routine */
+ nodev, /* no longer use mmap routine */
+ agpgart_segmap, /* system segmap routine */
+ nochpoll, /* no chpoll routine */
+ ddi_prop_op, /* system prop operations */
+ 0, /* not a STREAMS driver */
+ D_DEVMAP | D_MP, /* safe for multi-thread/multi-processor */
+ CB_REV, /* cb_ops version? */
+ nodev, /* cb_aread() */
+ nodev, /* cb_awrite() */
+};
+
+static struct dev_ops agpgart_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ agpgart_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ agpgart_attach, /* devo_attach */
+ agpgart_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &agpgart_cb_ops, /* devo_cb_ops */
+ (struct bus_ops *)0, /* devo_bus_ops */
+ NULL, /* devo_power */
+ ddi_quiesce_not_needed, /* devo_quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops,
+ "AGP driver",
+ &agpgart_ops,
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, /* MODREV_1 is indicated by manual */
+ {&modldrv, NULL, NULL, NULL}
+};
+
+static void *agpgart_glob_soft_handle;
+
+int
+_init(void)
+{
+ int ret = DDI_SUCCESS;
+
+ ret = ddi_soft_state_init(&agpgart_glob_soft_handle,
+ sizeof (agpgart_softstate_t),
+ AGPGART_MAX_INSTANCES);
+
+ if (ret != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "_init: soft state init error code=0x%x", ret));
+ return (ret);
+ }
+
+ if ((ret = mod_install(&modlinkage)) != 0) {
+ AGPDB_PRINT2((CE_WARN,
+ "_init: mod install error code=0x%x", ret));
+ ddi_soft_state_fini(&agpgart_glob_soft_handle);
+ return (ret);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ if ((ret = mod_remove(&modlinkage)) == 0) {
+ ddi_soft_state_fini(&agpgart_glob_soft_handle);
+ }
+
+ return (ret);
+}
diff --git a/usr/src/uts/intel/io/agpgart/agpgart.conf b/usr/src/uts/intel/io/agpgart/agpgart.conf
new file mode 100644
index 0000000..0223702
--- /dev/null
+++ b/usr/src/uts/intel/io/agpgart/agpgart.conf
@@ -0,0 +1,8 @@
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+name="agpgart" class="root";
diff --git a/usr/src/uts/intel/io/agpgart/agptarget.c b/usr/src/uts/intel/io/agpgart/agptarget.c
new file mode 100644
index 0000000..d1366d6
--- /dev/null
+++ b/usr/src/uts/intel/io/agpgart/agptarget.c
@@ -0,0 +1,953 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/modctl.h>
+#include <sys/sunldi.h>
+#include <sys/pci.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agptarget_io.h>
+
+int agptarget_debug_var = 0;
+#define TARGETDB_PRINT2(fmt) if (agptarget_debug_var >= 1) cmn_err fmt
+#define INST2NODENUM(inst) (inst)
+#define DEV2INST(dev) (getminor(dev))
+
+#define I915_IFPADDR 0x60
+#define I965_IFPADDR 0x70
+
+#define HIADDR(n) ((uint32_t)(((uint64_t)(n) & \
+ 0xFFFFFFFF00000000ULL) >> 32))
+#define LOADDR(n) ((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF))
+
+typedef struct agp_target_softstate {
+ dev_info_t *tsoft_dip;
+ ddi_acc_handle_t tsoft_pcihdl;
+ uint32_t tsoft_devid;
+ /* The offset of the ACAPID register */
+ off_t tsoft_acaptr;
+ kmutex_t tsoft_lock;
+ int tsoft_gms_off; /* GMS offset in config */
+ uint32_t tsoft_gms;
+}agp_target_softstate_t;
+
+/*
+ * To get the pre-allocated graphics mem size using Graphics Mode Select
+ * (GMS) value.
+ */
+typedef struct gms_mode {
+ uint32_t gm_devid; /* bridge vendor + device id */
+ off_t gm_regoff; /* mode selection register offset */
+ uint32_t gm_mask; /* GMS mask */
+ uint32_t gm_num; /* number of modes in gm_vec */
+ int *gm_vec; /* modes array */
+} gms_mode_t;
+
+static void *agptarget_glob_soft_handle;
+
+#define GETSOFTC(instance) ((agp_target_softstate_t *) \
+ ddi_get_soft_state(agptarget_glob_soft_handle, instance));
+
+/*
+ * The AMD8151 bridge is the only supported 64 bit hardware
+ */
+static int
+is_64bit_aper(agp_target_softstate_t *softstate)
+{
+ return (softstate->tsoft_devid == AMD_BR_8151);
+}
+
+/*
+ * Check if it is an intel bridge
+ */
+static int
+is_intel_br(agp_target_softstate_t *softstate)
+{
+ return ((softstate->tsoft_devid & VENDOR_ID_MASK) ==
+ INTEL_VENDOR_ID);
+}
+
+/*
+ * agp_target_cap_find()
+ *
+ * Description:
+ * This function searches the linked capability list to find the offset
+ * of the AGP capability register. When it was not found, return 0.
+ * This works for standard AGP chipsets, but not for some Intel chipsets,
+ * like the I830M/I830MP/I852PM/I852GME/I855GME. It will return 0 for
+ * these chipsets even if AGP is supported. So the offset of acapid
+ * should be set manually in thoses cases.
+ *
+ * Arguments:
+ * pci_handle ddi acc handle of pci config
+ *
+ * Returns:
+ * 0 No capability pointer register found
+ * nexcap The AGP capability pointer register offset
+ */
+static off_t
+agp_target_cap_find(ddi_acc_handle_t pci_handle)
+{
+ off_t nextcap = 0;
+ uint32_t ncapid = 0;
+ uint8_t value = 0;
+
+ /* Check if this device supports the capability pointer */
+ value = (uint8_t)(pci_config_get16(pci_handle, PCI_CONF_STAT)
+ & PCI_CONF_CAP_MASK);
+
+ if (!value)
+ return (0);
+ /* Get the offset of the first capability pointer from CAPPTR */
+ nextcap = (off_t)(pci_config_get8(pci_handle, AGP_CONF_CAPPTR));
+
+ /* Check the AGP capability from the first capability pointer */
+ while (nextcap) {
+ ncapid = pci_config_get32(pci_handle, nextcap);
+ /*
+ * AGP3.0 rev1.0 127 the capid was assigned by the PCI SIG,
+ * 845 data sheet page 69
+ */
+ if ((ncapid & PCI_CONF_CAPID_MASK) ==
+ AGP_CAP_ID) /* The AGP cap was found */
+ break;
+
+ nextcap = (off_t)((ncapid & PCI_CONF_NCAPID_MASK) >> 8);
+ }
+
+ return (nextcap);
+
+}
+
+/*
+ * agp_target_get_aperbase()
+ *
+ * Description:
+ * This function gets the AGP aperture base address from the AGP target
+ * register, the AGP aperture base register was programmed by the BIOS.
+ *
+ * Arguments:
+ * softstate driver soft state pointer
+ *
+ * Returns:
+ * aper_base AGP aperture base address
+ *
+ * Notes:
+ * If a 64bit bridge device is available, the AGP aperture base address
+ * can be 64 bit.
+ */
+static uint64_t
+agp_target_get_apbase(agp_target_softstate_t *softstate)
+{
+ uint64_t aper_base;
+
+ if (is_intel_br(softstate)) {
+ aper_base = pci_config_get32(softstate->tsoft_pcihdl,
+ AGP_CONF_APERBASE) & AGP_32_APERBASE_MASK;
+ } else if (is_64bit_aper(softstate)) {
+ aper_base = pci_config_get64(softstate->tsoft_pcihdl,
+ AGP_CONF_APERBASE);
+ /* 32-bit or 64-bit aperbase base pointer */
+ if ((aper_base & AGP_APER_TYPE_MASK) == 0)
+ aper_base &= AGP_32_APERBASE_MASK;
+ else
+ aper_base &= AGP_64_APERBASE_MASK;
+ }
+
+ return (aper_base);
+}
+
+/*
+ * agp_target_get_apsize()
+ *
+ * Description:
+ * This function gets the AGP aperture size by reading the AGP aperture
+ * size register.
+ * Arguments:
+ * softstate driver soft state pointer
+ *
+ * Return:
+ * size The AGP aperture size in megabytes
+ * 0 an unexpected error
+ */
+static size_t
+agp_target_get_apsize(agp_target_softstate_t *softstate)
+{
+ off_t cap;
+ uint16_t value;
+ size_t size, regsize;
+
+ ASSERT(softstate->tsoft_acaptr);
+ cap = softstate->tsoft_acaptr;
+
+ if (is_intel_br(softstate)) {
+ /* extend this value to 16 bit for later tests */
+ value = (uint16_t)pci_config_get8(softstate->tsoft_pcihdl,
+ cap + AGP_CONF_APERSIZE) | AGP_APER_SIZE_MASK;
+ } else if (is_64bit_aper(softstate)) {
+ value = pci_config_get16(softstate->tsoft_pcihdl,
+ cap + AGP_CONF_APERSIZE);
+ }
+
+ if (value & AGP_APER_128M_MASK) {
+ switch (value & AGP_APER_128M_MASK) {
+ case AGP_APER_4M:
+ size = 4; /* 4M */
+ break;
+ case AGP_APER_8M:
+ size = 8; /* 8M */
+ break;
+ case AGP_APER_16M:
+ size = 16; /* 16M */
+ break;
+ case AGP_APER_32M:
+ size = 32; /* 32M */
+ break;
+ case AGP_APER_64M:
+ size = 64; /* 64M */
+ break;
+ case AGP_APER_128M:
+ size = 128; /* 128M */
+ break;
+ default:
+ size = 0; /* not true */
+ }
+ } else {
+ switch (value & AGP_APER_4G_MASK) {
+ case AGP_APER_256M:
+ size = 256; /* 256 M */
+ break;
+ case AGP_APER_512M:
+ size = 512; /* 512 M */
+ break;
+ case AGP_APER_1024M:
+ size = 1024; /* 1024 M */
+ break;
+ case AGP_APER_2048M:
+ size = 2048; /* 2048 M */
+ break;
+ case AGP_APER_4G:
+ size = 4096; /* 4096 M */
+ break;
+ default:
+ size = 0; /* not true */
+ }
+ }
+ /*
+ * In some cases, there is no APSIZE register, so the size value
+ * of 256M could be wrong. Check the value by reading the size of
+ * the first register which was set in the PCI configuration space.
+ */
+ if (size == 256) {
+ if (ddi_dev_regsize(softstate->tsoft_dip,
+ AGP_TARGET_BAR1, (off_t *)&regsize) == DDI_FAILURE)
+ return (0);
+
+ if (MB2BYTES(size) != regsize) {
+ TARGETDB_PRINT2((CE_WARN,
+ "APSIZE 256M doesn't match regsize %lx",
+ regsize));
+ TARGETDB_PRINT2((CE_WARN, "Use regsize instead"));
+ size = BYTES2MB(regsize);
+ }
+ }
+
+ return (size);
+}
+
+static void
+agp_target_set_gartaddr(agp_target_softstate_t *softstate, uint32_t gartaddr)
+{
+ ASSERT(softstate->tsoft_acaptr);
+
+ /* Disable the GTLB for Intel chipsets */
+ pci_config_put16(softstate->tsoft_pcihdl,
+ softstate->tsoft_acaptr + AGP_CONF_CONTROL, 0x0000);
+
+ pci_config_put32(softstate->tsoft_pcihdl,
+ softstate->tsoft_acaptr + AGP_CONF_ATTBASE,
+ gartaddr & AGP_ATTBASE_MASK);
+}
+
+/*
+ * Pre-allocated graphics memory for every type of Intel north bridge, mem size
+ * are specified in kbytes.
+ */
+#define GMS_MB(n) ((n) * 1024)
+#define GMS_SHIFT 4
+#define GMS_SIZE(a) (sizeof (a) / sizeof (int))
+
+/*
+ * Since value zero always means "No memory pre-allocated", value of (GMS - 1)
+ * is used to index these arrays, i.e. gms_xxx[1] contains the mem size (in kb)
+ * that GMS value 0x1 corresponding to.
+ *
+ * Assuming all "reserved" GMS value as zero bytes of pre-allocated graphics
+ * memory, unless some special BIOS settings exist.
+ */
+static int gms_810[12] = {0, 0, 0, 0, 0, 0, 0, 512, 0, 0, 0, GMS_MB(1)};
+static int gms_830_845[4] = {0, 512, GMS_MB(1), GMS_MB(8)};
+static int gms_855GM[5] = {GMS_MB(1), GMS_MB(4), GMS_MB(8), GMS_MB(16),
+ GMS_MB(32)};
+/* There is no modes for 16M in datasheet, but some BIOS add it. */
+static int gms_865_915GM[4] = {GMS_MB(1), 0, GMS_MB(8), GMS_MB(16)};
+static int gms_915_945_965[3] = {GMS_MB(1), 0, GMS_MB(8)};
+static int gms_965GM[7] = {GMS_MB(1), GMS_MB(4), GMS_MB(8), GMS_MB(16),
+ GMS_MB(32), GMS_MB(48), GMS_MB(64)};
+static int gms_X33[9] = {GMS_MB(1), GMS_MB(4), GMS_MB(8), GMS_MB(16),
+ GMS_MB(32), GMS_MB(48), GMS_MB(64), GMS_MB(128), GMS_MB(256)};
+static int gms_G4X[13] = {0, 0, 0, 0,
+ GMS_MB(32), GMS_MB(48), GMS_MB(64), GMS_MB(128), GMS_MB(256),
+ GMS_MB(96), GMS_MB(160), GMS_MB(224), GMS_MB(352)};
+
+static gms_mode_t gms_modes[] = {
+ {INTEL_BR_810, I810_CONF_SMRAM, I810_GMS_MASK,
+ GMS_SIZE(gms_810), gms_810},
+ {INTEL_BR_810DC, I810_CONF_SMRAM, I810_GMS_MASK,
+ GMS_SIZE(gms_810), gms_810},
+ {INTEL_BR_810E, I810_CONF_SMRAM, I810_GMS_MASK,
+ GMS_SIZE(gms_810), gms_810},
+ {INTEL_BR_830M, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_830_845), gms_830_845},
+ {INTEL_BR_845, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_830_845), gms_830_845},
+ {INTEL_BR_855GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_855GM), gms_855GM},
+ {INTEL_BR_865, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_865_915GM), gms_865_915GM},
+ {INTEL_BR_915GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_865_915GM), gms_865_915GM},
+ {INTEL_BR_915, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_945, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_945GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_945GME, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_946GZ, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_965G1, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_965G2, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_965Q, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_915_945_965), gms_915_945_965},
+ {INTEL_BR_965GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_965GM), gms_965GM},
+ {INTEL_BR_965GME, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_965GM), gms_965GM},
+ {INTEL_BR_Q35, I8XX_CONF_GC, IX33_GC_MODE_MASK,
+ GMS_SIZE(gms_X33), gms_X33},
+ {INTEL_BR_G33, I8XX_CONF_GC, IX33_GC_MODE_MASK,
+ GMS_SIZE(gms_X33), gms_X33},
+ {INTEL_BR_Q33, I8XX_CONF_GC, IX33_GC_MODE_MASK,
+ GMS_SIZE(gms_X33), gms_X33},
+ {INTEL_BR_GM45, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_965GM), gms_965GM},
+ {INTEL_BR_EL, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_Q45, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_G45, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_G41, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_IGDNG_D, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_IGDNG_M, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_IGDNG_MA, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_IGDNG_MC2, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X},
+ {INTEL_BR_B43, I8XX_CONF_GC, I8XX_GC_MODE_MASK,
+ GMS_SIZE(gms_G4X), gms_G4X}
+};
+static int
+get_chip_gms(uint32_t devid)
+{
+ int num_modes;
+ int i;
+
+ num_modes = (sizeof (gms_modes) / sizeof (gms_mode_t));
+
+ for (i = 0; i < num_modes; i++) {
+ if (gms_modes[i].gm_devid == devid)
+ break;
+ }
+
+ return ((i == num_modes) ? -1 : i);
+}
+
+/* Returns the size (kbytes) of pre-allocated graphics memory */
+static size_t
+i8xx_biosmem_detect(agp_target_softstate_t *softstate)
+{
+ uint8_t memval;
+ size_t kbytes;
+ int gms_off;
+
+ kbytes = 0;
+ gms_off = softstate->tsoft_gms_off;
+
+ /* fetch the GMS value from DRAM controller */
+ memval = pci_config_get8(softstate->tsoft_pcihdl,
+ gms_modes[gms_off].gm_regoff);
+ TARGETDB_PRINT2((CE_NOTE, "i8xx_biosmem_detect: memval = %x", memval));
+ memval = (memval & gms_modes[gms_off].gm_mask) >> GMS_SHIFT;
+ /* assuming zero byte for 0 or "reserved" GMS values */
+ if (memval == 0 || memval > gms_modes[gms_off].gm_num) {
+ TARGETDB_PRINT2((CE_WARN, "i8xx_biosmem_detect: "
+ "devid = %x, GMS = %x. assuming zero byte of "
+ "pre-allocated memory",
+ gms_modes[gms_off].gm_devid, memval));
+ goto done;
+ }
+ memval--; /* use (GMS_value - 1) as index */
+ kbytes = (gms_modes[gms_off].gm_vec)[memval];
+
+done:
+ TARGETDB_PRINT2((CE_NOTE,
+ "i8xx_biosmem_detect: %ldKB BIOS pre-allocated memory detected",
+ kbytes));
+ return (kbytes);
+}
+
+/*ARGSUSED*/
+static int agptarget_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
+ void *arg, void **resultp)
+{
+ agp_target_softstate_t *st;
+ int instance, rval = DDI_FAILURE;
+ dev_t dev;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ dev = (dev_t)arg;
+ instance = DEV2INST(dev);
+ st = ddi_get_soft_state(agptarget_glob_soft_handle, instance);
+ if (st != NULL) {
+ mutex_enter(&st->tsoft_lock);
+ *resultp = st->tsoft_dip;
+ mutex_exit(&st->tsoft_lock);
+ rval = DDI_SUCCESS;
+ } else
+ *resultp = NULL;
+
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ dev = (dev_t)arg;
+ instance = DEV2INST(dev);
+ *resultp = (void *)(uintptr_t)instance;
+ rval = DDI_SUCCESS;
+ default:
+ break;
+ }
+
+ return (rval);
+}
+
+static int
+intel_br_resume(agp_target_softstate_t *softstate)
+{
+ int gms_off;
+
+ gms_off = softstate->tsoft_gms_off;
+
+ /*
+ * We recover the gmch graphics control register here
+ */
+ pci_config_put16(softstate->tsoft_pcihdl,
+ gms_modes[gms_off].gm_regoff, softstate->tsoft_gms);
+
+ return (DDI_SUCCESS);
+}
+static int
+intel_br_suspend(agp_target_softstate_t *softstate)
+{
+ int gms_off;
+
+ gms_off = softstate->tsoft_gms_off;
+ softstate->tsoft_gms = pci_config_get16(softstate->tsoft_pcihdl,
+ gms_modes[gms_off].gm_regoff);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+agp_target_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ agp_target_softstate_t *softstate;
+ int instance;
+ int status;
+
+ instance = ddi_get_instance(dip);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+ case DDI_RESUME:
+ softstate =
+ ddi_get_soft_state(agptarget_glob_soft_handle, instance);
+ return (intel_br_resume(softstate));
+ default:
+ TARGETDB_PRINT2((CE_WARN, "agp_target_attach:"
+ "only attach and resume ops are supported"));
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_soft_state_zalloc(agptarget_glob_soft_handle,
+ instance) != DDI_SUCCESS) {
+ TARGETDB_PRINT2((CE_WARN, "agp_target_attach:"
+ "soft state zalloc failed"));
+ return (DDI_FAILURE);
+ }
+
+ softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance);
+ mutex_init(&softstate->tsoft_lock, NULL, MUTEX_DRIVER, NULL);
+ softstate->tsoft_dip = dip;
+ status = pci_config_setup(dip, &softstate->tsoft_pcihdl);
+ if (status != DDI_SUCCESS) {
+ TARGETDB_PRINT2((CE_WARN, "agp_target_attach:"
+ "pci config setup failed"));
+ ddi_soft_state_free(agptarget_glob_soft_handle,
+ instance);
+ return (DDI_FAILURE);
+ }
+
+ softstate->tsoft_devid = pci_config_get32(softstate->tsoft_pcihdl,
+ PCI_CONF_VENID);
+ softstate->tsoft_gms_off = get_chip_gms(softstate->tsoft_devid);
+ if (softstate->tsoft_gms_off < 0) {
+ TARGETDB_PRINT2((CE_WARN, "agp_target_attach:"
+ "read gms offset failed"));
+ pci_config_teardown(&softstate->tsoft_pcihdl);
+ ddi_soft_state_free(agptarget_glob_soft_handle,
+ instance);
+ return (DDI_FAILURE);
+ }
+ softstate->tsoft_acaptr = agp_target_cap_find(softstate->tsoft_pcihdl);
+ if (softstate->tsoft_acaptr == 0) {
+ /* Make a correction for some Intel chipsets */
+ if (is_intel_br(softstate))
+ softstate->tsoft_acaptr = AGP_CAP_OFF_DEF;
+ else {
+ TARGETDB_PRINT2((CE_WARN, "agp_target_attach:"
+ "Not a supposed corretion"));
+ pci_config_teardown(&softstate->tsoft_pcihdl);
+ ddi_soft_state_free(agptarget_glob_soft_handle,
+ instance);
+ return (DDI_FAILURE);
+ }
+ }
+
+ status = ddi_create_minor_node(dip, AGPTARGET_NAME, S_IFCHR,
+ INST2NODENUM(instance), DDI_NT_AGP_TARGET, 0);
+
+ if (status != DDI_SUCCESS) {
+ TARGETDB_PRINT2((CE_WARN, "agp_target_attach:"
+ "Create minor node failed"));
+ pci_config_teardown(&softstate->tsoft_pcihdl);
+ ddi_soft_state_free(agptarget_glob_soft_handle, instance);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+agp_target_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance;
+ agp_target_softstate_t *softstate;
+
+ instance = ddi_get_instance(dip);
+ softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance);
+
+ if (cmd == DDI_SUSPEND) {
+ /* get GMS modes list entry */
+ return (intel_br_suspend(softstate));
+ }
+
+ if (cmd != DDI_DETACH) {
+ TARGETDB_PRINT2((CE_WARN, "agp_target_detach:"
+ "only detach and suspend ops are supported"));
+ return (DDI_FAILURE);
+ }
+
+ ddi_remove_minor_node(dip, AGPTARGET_NAME);
+ pci_config_teardown(&softstate->tsoft_pcihdl);
+ mutex_destroy(&softstate->tsoft_lock);
+ ddi_soft_state_free(agptarget_glob_soft_handle, instance);
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+agp_target_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
+ cred_t *cred, int *rval)
+{
+ int instance = DEV2INST(dev);
+ agp_target_softstate_t *st;
+ static char kernel_only[] =
+ "amd64_gart_ioctl: is a kernel only ioctl";
+
+ if (!(mode & FKIOCTL)) {
+ TARGETDB_PRINT2((CE_CONT, kernel_only));
+ return (ENXIO);
+ }
+ st = GETSOFTC(instance);
+
+ if (st == NULL)
+ return (ENXIO);
+
+ mutex_enter(&st->tsoft_lock);
+
+ switch (cmd) {
+ case CHIP_DETECT:
+ {
+ int type = 0;
+
+ if (is_intel_br(st))
+ type = CHIP_IS_INTEL;
+ else if (is_64bit_aper(st))
+ type = CHIP_IS_AMD;
+ else {
+ type = 0;
+ TARGETDB_PRINT2((CE_WARN, "Unknown bridge!"));
+ }
+
+ if (ddi_copyout(&type, (void *)data, sizeof (int), mode)) {
+ mutex_exit(&st->tsoft_lock);
+ return (EFAULT);
+ }
+
+ break;
+ }
+ case I8XX_GET_PREALLOC_SIZE:
+ {
+ size_t prealloc_size;
+
+ if (!is_intel_br(st)) {
+ mutex_exit(&st->tsoft_lock);
+ return (EINVAL);
+ }
+
+ prealloc_size = i8xx_biosmem_detect(st);
+ if (ddi_copyout(&prealloc_size, (void *)data,
+ sizeof (size_t), mode)) {
+ mutex_exit(&st->tsoft_lock);
+ return (EFAULT);
+ }
+
+ break;
+ }
+ case AGP_TARGET_GETINFO:
+ {
+ i_agp_info_t info;
+ uint32_t value;
+ off_t cap;
+
+ ASSERT(st->tsoft_acaptr);
+
+ cap = st->tsoft_acaptr;
+ value = pci_config_get32(st->tsoft_pcihdl, cap);
+ info.iagp_ver.agpv_major = (uint16_t)((value >> 20) & 0xf);
+ info.iagp_ver.agpv_minor = (uint16_t)((value >> 16) & 0xf);
+ info.iagp_devid = st->tsoft_devid;
+ info.iagp_mode = pci_config_get32(st->tsoft_pcihdl,
+ cap + AGP_CONF_STATUS);
+ info.iagp_aperbase = agp_target_get_apbase(st);
+ info.iagp_apersize = agp_target_get_apsize(st);
+
+ if (ddi_copyout(&info, (void *)data,
+ sizeof (i_agp_info_t), mode)) {
+ mutex_exit(&st->tsoft_lock);
+ return (EFAULT);
+ }
+ break;
+
+ }
+ /*
+ * This ioctl is only for Intel AGP chipsets.
+ * It is not necessary for the AMD8151 AGP bridge, because
+ * this register in the AMD8151 does not control any hardware.
+ * It is only provided for compatibility with an Intel AGP bridge.
+ * Please refer to the <<AMD8151 data sheet>> page 24,
+ * AGP device GART pointer.
+ */
+ case AGP_TARGET_SET_GATTADDR:
+ {
+ uint32_t gartaddr;
+
+ if (ddi_copyin((void *)data, &gartaddr,
+ sizeof (uint32_t), mode)) {
+ mutex_exit(&st->tsoft_lock);
+ return (EFAULT);
+ }
+
+ agp_target_set_gartaddr(st, gartaddr);
+ break;
+ }
+ case AGP_TARGET_SETCMD:
+ {
+ uint32_t command;
+
+ if (ddi_copyin((void *)data, &command,
+ sizeof (uint32_t), mode)) {
+ mutex_exit(&st->tsoft_lock);
+ return (EFAULT);
+ }
+
+ ASSERT(st->tsoft_acaptr);
+
+ pci_config_put32(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_COMMAND,
+ command);
+ break;
+
+ }
+ case AGP_TARGET_FLUSH_GTLB:
+ {
+ uint16_t value;
+
+ ASSERT(st->tsoft_acaptr);
+
+ value = pci_config_get16(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_CONTROL);
+ value &= ~AGPCTRL_GTLBEN;
+ pci_config_put16(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_CONTROL, value);
+ value |= AGPCTRL_GTLBEN;
+ pci_config_put16(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_CONTROL, value);
+
+ break;
+ }
+ case AGP_TARGET_CONFIGURE:
+ {
+ uint8_t value;
+
+ ASSERT(st->tsoft_acaptr);
+
+ /*
+ * In Intel agp bridges, agp misc register offset
+ * is indexed from 0 instead of capability register.
+ * AMD agp bridges have no such misc register
+ * to control the aperture access, and they have
+ * similar regsiters in CPU gart devices instead.
+ */
+
+ if (is_intel_br(st)) {
+ value = pci_config_get8(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_MISC);
+ value |= AGP_MISC_APEN;
+ pci_config_put8(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_MISC, value);
+ }
+ break;
+
+ }
+ case AGP_TARGET_UNCONFIG:
+ {
+ uint32_t value1;
+ uint8_t value2;
+
+ ASSERT(st->tsoft_acaptr);
+
+ pci_config_put16(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_CONTROL, 0x0);
+
+ if (is_intel_br(st)) {
+ value2 = pci_config_get8(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_MISC);
+ value2 &= ~AGP_MISC_APEN;
+ pci_config_put8(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_MISC, value2);
+ }
+
+ value1 = pci_config_get32(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_COMMAND);
+ value1 &= ~AGPCMD_AGPEN;
+ pci_config_put32(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_COMMAND,
+ value1);
+
+ pci_config_put32(st->tsoft_pcihdl,
+ st->tsoft_acaptr + AGP_CONF_ATTBASE, 0x0);
+
+ break;
+ }
+
+ case INTEL_CHIPSET_FLUSH_SETUP:
+ case INTEL_CHIPSET_FLUSH:
+ case INTEL_CHIPSET_FLUSH_FREE:
+ break;
+ default:
+ mutex_exit(&st->tsoft_lock);
+ return (ENXIO);
+ } /* end switch */
+
+ mutex_exit(&st->tsoft_lock);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+agp_target_open(dev_t *devp, int flag, int otyp, cred_t *cred)
+{
+ int instance = DEV2INST(*devp);
+ agp_target_softstate_t *st;
+
+ if (!(flag & FKLYR))
+ return (ENXIO);
+
+ st = GETSOFTC(instance);
+
+ if (st == NULL)
+ return (ENXIO);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+agp_target_close(dev_t dev, int flag, int otyp, cred_t *cred)
+{
+ int instance = DEV2INST(dev);
+ agp_target_softstate_t *st;
+
+ st = GETSOFTC(instance);
+
+ if (st == NULL)
+ return (ENXIO);
+
+ return (0);
+}
+
+static struct cb_ops agp_target_cb_ops = {
+ agp_target_open, /* cb_open */
+ agp_target_close, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read() */
+ nodev, /* cb_write() */
+ agp_target_ioctl, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ 0, /* cb_stream */
+ D_NEW | D_MP, /* cb_flag */
+ CB_REV, /* cb_ops version? */
+ nodev, /* cb_aread() */
+ nodev, /* cb_awrite() */
+};
+
+/* device operations */
+static struct dev_ops agp_target_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ agptarget_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ agp_target_attach, /* devo_attach */
+ agp_target_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &agp_target_cb_ops, /* devo_cb_ops */
+ 0, /* devo_bus_ops */
+ 0, /* devo_power */
+ ddi_quiesce_not_needed, /* devo_quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops,
+ "AGP target driver",
+ &agp_target_ops,
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, /* MODREV_1 is indicated by manual */
+ {&modldrv, NULL, NULL, NULL}
+};
+
+int
+_init(void)
+{
+ int ret;
+
+ ret = ddi_soft_state_init(&agptarget_glob_soft_handle,
+ sizeof (agp_target_softstate_t), 1);
+
+ if (ret)
+ goto err1;
+
+ if ((ret = mod_install(&modlinkage)) != 0) {
+ goto err2;
+ }
+
+ return (DDI_SUCCESS);
+err2:
+ ddi_soft_state_fini(&agptarget_glob_soft_handle);
+err1:
+ return (ret);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ if ((ret = mod_remove(&modlinkage)) == 0) {
+ ddi_soft_state_fini(&agptarget_glob_soft_handle);
+ }
+ return (ret);
+}
diff --git a/usr/src/uts/intel/io/agpgart/amd64_gart.c b/usr/src/uts/intel/io/agpgart/amd64_gart.c
new file mode 100644
index 0000000..b374c87
--- /dev/null
+++ b/usr/src/uts/intel/io/agpgart/amd64_gart.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunldi.h>
+#include <sys/file.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpamd64gart_io.h>
+
+#define MAX_GART_INSTS 8
+#define GETSOFTC(instance) ((amd64_gart_softstate_t *) \
+ ddi_get_soft_state(amd64_gart_glob_soft_handle, (instance)));
+#define DEV2INST(dev) (getminor(dev))
+#define INST2NODENUM(inst) (inst)
+
+int amd64_debug_var = 0;
+#define AMD64DB_PRINT1(fmt) if (amd64_debug_var == 1) cmn_err fmt
+#define AMD64DB_PRINT2(fmt) if (amd64_debug_var >= 1) cmn_err fmt
+
+typedef struct amd64_gart_softstate {
+ dev_info_t *gsoft_dip;
+ ddi_acc_handle_t gsoft_pcihdl;
+ kmutex_t gsoft_lock;
+}amd64_gart_softstate_t;
+
+static void *amd64_gart_glob_soft_handle;
+
+static uint64_t
+amd64_get_aperbase(amd64_gart_softstate_t *sc)
+{
+ uint32_t value;
+ uint64_t aper_base;
+
+ /* amd64 aperture base support 40 bits and 32M aligned */
+ value = pci_config_get32(sc->gsoft_pcihdl,
+ AMD64_APERTURE_BASE) & AMD64_APERBASE_MASK;
+ aper_base = (uint64_t)value << AMD64_APERBASE_SHIFT;
+ return (aper_base);
+}
+
+static size_t
+amd64_get_apersize(amd64_gart_softstate_t *sc)
+{
+ uint32_t value;
+ size_t size;
+
+ value = pci_config_get32(sc->gsoft_pcihdl, AMD64_APERTURE_CONTROL);
+
+ value = (value & AMD64_APERSIZE_MASK) >> 1;
+
+ /* aper size = 2^value x 32 */
+ switch (value) {
+ case 0x0:
+ size = 32;
+ break;
+ case 0x1:
+ size = 64;
+ break;
+ case 0x2:
+ size = 128;
+ break;
+ case 0x3:
+ size = 256;
+ break;
+ case 0x4:
+ size = 512;
+ break;
+ case 0x5:
+ size = 1024;
+ break;
+ case 0x6:
+ size = 2048;
+ break;
+ default: /* reserved */
+ size = 0;
+ };
+
+ return (size);
+}
+
+static void
+amd64_invalidate_gtlb(amd64_gart_softstate_t *sc)
+{
+ uint32_t value;
+
+ value = pci_config_get32(sc->gsoft_pcihdl, AMD64_GART_CACHE_CTL);
+ value |= AMD64_INVALID_CACHE;
+
+ pci_config_put32(sc->gsoft_pcihdl, AMD64_GART_CACHE_CTL, value);
+}
+
+static void
+amd64_enable_gart(amd64_gart_softstate_t *sc, int enable)
+{
+ uint32_t aper_ctl;
+ uint32_t aper_base;
+ uint32_t gart_ctl;
+ uint32_t gart_base;
+
+ aper_ctl = pci_config_get32(sc->gsoft_pcihdl, AMD64_APERTURE_CONTROL);
+ AMD64DB_PRINT1((CE_NOTE, "before: aper_ctl = %x", aper_ctl));
+ aper_base = pci_config_get32(sc->gsoft_pcihdl, AMD64_APERTURE_BASE);
+ gart_ctl = pci_config_get32(sc->gsoft_pcihdl, AMD64_GART_CACHE_CTL);
+ gart_base = pci_config_get32(sc->gsoft_pcihdl, AMD64_GART_BASE);
+#ifdef lint
+ aper_base = aper_base;
+ gart_ctl = gart_ctl;
+ gart_base = gart_base;
+#endif /* lint */
+ AMD64DB_PRINT1((CE_NOTE, "before: aper_base = %x", aper_base));
+ AMD64DB_PRINT1((CE_NOTE, "before: gart_ctl = %x", gart_ctl));
+ AMD64DB_PRINT1((CE_NOTE, "before: gart_base = %x", gart_base));
+ if (enable) {
+ aper_ctl |= AMD64_GARTEN;
+ aper_ctl &= ~(AMD64_DISGARTCPU | AMD64_DISGARTIO);
+ } else
+ aper_ctl &= (~AMD64_GARTEN);
+
+ pci_config_put32(sc->gsoft_pcihdl, AMD64_APERTURE_CONTROL, aper_ctl);
+}
+
+/*ARGSUSED*/
+static int
+amd64_gart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
+ void *arg, void **resultp)
+{
+ amd64_gart_softstate_t *st;
+ int instance, rval = DDI_FAILURE;
+ dev_t dev;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ dev = (dev_t)arg;
+ instance = DEV2INST(dev);
+ st = ddi_get_soft_state(amd64_gart_glob_soft_handle, instance);
+ if (st != NULL) {
+ mutex_enter(&st->gsoft_lock);
+ *resultp = st->gsoft_dip;
+ mutex_exit(&st->gsoft_lock);
+ rval = DDI_SUCCESS;
+ } else {
+ *resultp = NULL;
+ }
+
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ dev = (dev_t)arg;
+ instance = DEV2INST(dev);
+ *resultp = (void *)(uintptr_t)instance;
+ rval = DDI_SUCCESS;
+ break;
+ default:
+ break;
+ }
+
+ return (rval);
+}
+
+static int
+amd64_gart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance;
+ amd64_gart_softstate_t *sc;
+ int status;
+ char buf[80];
+
+ switch (cmd) {
+ default:
+ return (DDI_FAILURE);
+
+ case DDI_RESUME:
+ /* Nothing special is needed for resume. */
+ return (DDI_SUCCESS);
+
+ case DDI_ATTACH:
+ break;
+ }
+
+ instance = ddi_get_instance(dip);
+
+ if (ddi_soft_state_zalloc(amd64_gart_glob_soft_handle, instance) !=
+ DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ sc = ddi_get_soft_state(amd64_gart_glob_soft_handle, instance);
+ mutex_init(&sc->gsoft_lock, NULL, MUTEX_DRIVER, NULL);
+ sc->gsoft_dip = dip;
+ status = pci_config_setup(dip, &sc->gsoft_pcihdl);
+ if (status != DDI_SUCCESS) {
+ ddi_soft_state_free(amd64_gart_glob_soft_handle, instance);
+ return (DDI_FAILURE);
+ }
+ (void) sprintf(buf, "%s-%d", AMD64GART_NAME, instance);
+ status = ddi_create_minor_node(dip, buf, S_IFCHR,
+ INST2NODENUM(instance), DDI_NT_AGP_CPUGART, 0);
+ if (status != DDI_SUCCESS) {
+ pci_config_teardown(&sc->gsoft_pcihdl);
+ ddi_soft_state_free(amd64_gart_glob_soft_handle, instance);
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+amd64_gart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance;
+ amd64_gart_softstate_t *sc;
+ char buf[80];
+
+ switch (cmd) {
+ default:
+ return (DDI_FAILURE);
+
+ case DDI_SUSPEND:
+ /* Nothing special is needed for suspend */
+ return (DDI_SUCCESS);
+
+ case DDI_DETACH:
+ break;
+ }
+
+ instance = ddi_get_instance(dip);
+ sc = ddi_get_soft_state(amd64_gart_glob_soft_handle, instance);
+
+ (void) sprintf(buf, "%s-%d", AMD64GART_NAME, instance);
+ ddi_remove_minor_node(dip, buf);
+ pci_config_teardown(&sc->gsoft_pcihdl);
+ mutex_destroy(&sc->gsoft_lock);
+ ddi_soft_state_free(amd64_gart_glob_soft_handle, instance);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+amd64_gart_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
+ cred_t *cred, int *rval)
+{
+ int instance;
+ amd64_gart_softstate_t *sc;
+ static char kernel_only[] =
+ "amd64_gart_ioctl: is a kernel only ioctl";
+
+ if (!(mode & FKIOCTL)) {
+ AMD64DB_PRINT2((CE_CONT, kernel_only));
+ return (ENXIO);
+ }
+ instance = DEV2INST(dev);
+ sc = GETSOFTC(instance);
+
+ if (sc == NULL)
+ return (ENXIO);
+ mutex_enter(&sc->gsoft_lock);
+
+ switch (cmd) {
+ case AMD64_GET_INFO:
+ {
+ amdgart_info_t info;
+
+ info.cgart_aperbase = amd64_get_aperbase(sc);
+ info.cgart_apersize = amd64_get_apersize(sc);
+
+ if (ddi_copyout(&info, (void *)data,
+ sizeof (amdgart_info_t), mode)) {
+ mutex_exit(&sc->gsoft_lock);
+ return (EFAULT);
+ }
+ break;
+ }
+ case AMD64_SET_GART_ADDR:
+ {
+ uint32_t addr;
+
+ if (ddi_copyin((void *)data, &addr, sizeof (uint32_t), mode)) {
+ mutex_exit(&sc->gsoft_lock);
+ return (EFAULT);
+ }
+
+ pci_config_put32(sc->gsoft_pcihdl, AMD64_GART_BASE, addr);
+ amd64_enable_gart(sc, 1);
+
+ break;
+ }
+ case AMD64_FLUSH_GTLB:
+ {
+ amd64_invalidate_gtlb(sc);
+
+ break;
+ }
+ case AMD64_CONFIGURE:
+ {
+ /* reserved */
+ break;
+ }
+ case AMD64_UNCONFIG:
+ {
+ amd64_enable_gart(sc, 0);
+ pci_config_put32(sc->gsoft_pcihdl, AMD64_GART_BASE, 0x00000000);
+
+ break;
+ }
+ default:
+ mutex_exit(&sc->gsoft_lock);
+ return (ENXIO);
+
+ }
+
+ mutex_exit(&sc->gsoft_lock);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+amd64_gart_open(dev_t *dev, int flag, int otyp, cred_t *cred)
+{
+ int instance;
+ amd64_gart_softstate_t *sc;
+
+ if (!(flag & FKLYR))
+ return (ENXIO);
+
+ instance = DEV2INST(*dev);
+ sc = GETSOFTC(instance);
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+amd64_gart_close(dev_t dev, int flag, int otyp, cred_t *cred)
+{
+ int instance;
+ amd64_gart_softstate_t *sc;
+
+ instance = DEV2INST(dev);
+ sc = GETSOFTC(instance);
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ return (0);
+}
+
+static struct cb_ops amd64_gart_cb_ops = {
+ amd64_gart_open, /* cb_open() */
+ amd64_gart_close, /* cb_close() */
+ nodev, /* cb_strategy() */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read() */
+ nodev, /* cb_write() */
+ amd64_gart_ioctl, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ 0, /* cb_stream */
+ D_NEW | D_MP, /* cb_flag */
+ CB_REV, /* cb_ops version? */
+ nodev, /* cb_aread() */
+ nodev, /* cb_awrite() */
+};
+
+/* device operations */
+static struct dev_ops amd64_gart_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ amd64_gart_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ amd64_gart_attach, /* devo_attach */
+ amd64_gart_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &amd64_gart_cb_ops, /* devo_cb_ops */
+ 0, /* devo_bus_ops */
+ 0, /* devo_power */
+ ddi_quiesce_not_needed, /* devo_quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops,
+ "AGP AMD gart driver",
+ &amd64_gart_ops,
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, /* MODREV_1 is indicated by manual */
+ &modldrv,
+ NULL
+};
+
+
+int
+_init(void)
+{
+ int ret = DDI_SUCCESS;
+
+ ret = ddi_soft_state_init(&amd64_gart_glob_soft_handle,
+ sizeof (amd64_gart_softstate_t),
+ MAX_GART_INSTS);
+
+ if (ret)
+ return (ret);
+ if ((ret = mod_install(&modlinkage)) != 0) {
+ ddi_soft_state_fini(&amd64_gart_glob_soft_handle);
+ return (ret);
+ }
+ return (DDI_SUCCESS);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int ret;
+ if ((ret = mod_remove(&modlinkage)) == 0) {
+ ddi_soft_state_fini(&amd64_gart_glob_soft_handle);
+ }
+ return (ret);
+}
diff --git a/usr/src/uts/intel/io/agpmaster/agpmaster.c b/usr/src/uts/intel/io/agpmaster/agpmaster.c
new file mode 100644
index 0000000..7a4de1e
--- /dev/null
+++ b/usr/src/uts/intel/io/agpmaster/agpmaster.c
@@ -0,0 +1,732 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Misc module for AGP master device support
+ */
+
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/types.h>
+#include <sys/dditypes.h>
+#include <sys/sunddi.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+
+#define PGTBL_CTL 0x2020 /* Page table control register */
+#define I8XX_FB_BAR 1
+#define I8XX_MMIO_BAR 2
+#define I8XX_PTE_OFFSET 0x10000
+#define I915_MMADR 1 /* mem-mapped registers BAR */
+#define I915_GMADR 3 /* graphics mem BAR */
+#define I915_GTTADDR 4 /* GTT BAR */
+#define I965_GTTMMADR 1 /* mem-mapped registers BAR + GTT */
+/* In 965 1MB GTTMMADR, GTT reside in the latter 512KB */
+#define I965_GTT_OFFSET 0x80000
+#define GM45_GTT_OFFSET 0x200000
+#define GTT_SIZE_MASK 0xe
+#define GTT_512KB (0 << 1)
+#define GTT_256KB (1 << 1)
+#define GTT_128KB (2 << 1)
+#define GTT_1MB (3 << 1)
+#define GTT_2MB (4 << 1)
+#define GTT_1_5MB (5 << 1)
+
+#define MMIO_BASE(x) (x)->agpm_data.agpm_gtt.gtt_mmio_base
+#define MMIO_HANDLE(x) (x)->agpm_data.agpm_gtt.gtt_mmio_handle
+#define GTT_HANDLE(x) (x)->agpm_data.agpm_gtt.gtt_handle
+/* Base address of GTT */
+#define GTT_ADDR(x) (x)->agpm_data.agpm_gtt.gtt_addr
+/* Graphics memory base address */
+#define APER_BASE(x) (x)->agpm_data.agpm_gtt.gtt_info.igd_aperbase
+
+#define AGPM_WRITE(x, off, val) \
+ ddi_put32(MMIO_HANDLE(x), (uint32_t *)(MMIO_BASE(x) + (off)), (val));
+
+#define AGPM_READ(x, off) \
+ ddi_get32(MMIO_HANDLE(x), (uint32_t *)(MMIO_BASE(x) + (off)));
+
+#ifdef DEBUG
+#define CONFIRM(value) ASSERT(value)
+#else
+#define CONFIRM(value) if (!(value)) return (EINVAL)
+#endif
+
+int agpm_debug = 0;
+#define AGPM_DEBUG(args) if (agpm_debug >= 1) cmn_err args
+
+/*
+ * Whether it is a Intel integrated graphics card
+ */
+#define IS_IGD(agpmaster) ((agpmaster->agpm_dev_type == DEVICE_IS_I810) || \
+ (agpmaster->agpm_dev_type == DEVICE_IS_I830))
+
+static struct modlmisc modlmisc = {
+ &mod_miscops, "AGP master interfaces"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modlmisc, NULL
+};
+
+static ddi_device_acc_attr_t i8xx_dev_access = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+static off_t agpmaster_cap_find(ddi_acc_handle_t);
+static int detect_i8xx_device(agp_master_softc_t *);
+static int detect_agp_devcice(agp_master_softc_t *, ddi_acc_handle_t);
+static int i8xx_add_to_gtt(gtt_impl_t *, igd_gtt_seg_t);
+static void i8xx_remove_from_gtt(gtt_impl_t *, igd_gtt_seg_t);
+
+int
+_init(void)
+{
+ int err;
+
+ if ((err = mod_install(&modlinkage)) != 0)
+ return (err);
+
+ return (0);
+}
+
+int
+_fini(void)
+{
+ int err;
+
+ if ((err = mod_remove(&modlinkage)) != 0)
+ return (err);
+
+ return (0);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/*
+ * Minor node is not removed here, since the caller (xx_attach) is
+ * responsible for removing all nodes.
+ */
+void
+agpmaster_detach(agp_master_softc_t **master_softcp)
+{
+ agp_master_softc_t *master_softc;
+
+ ASSERT(master_softcp);
+ master_softc = *master_softcp;
+
+ /* intel integrated device */
+ if (IS_IGD(master_softc) &&
+ ((MMIO_HANDLE(master_softc) != NULL) ||
+ (GTT_HANDLE(master_softc) != NULL))) {
+ /*
+ * for some chipsets, mmap handle is shared between both mmio
+ * and GTT table.
+ */
+ if ((GTT_HANDLE(master_softc) != MMIO_HANDLE(master_softc)) &&
+ (GTT_HANDLE(master_softc) != NULL))
+ ddi_regs_map_free(&GTT_HANDLE(master_softc));
+ if (MMIO_HANDLE(master_softc) != NULL)
+ ddi_regs_map_free(&MMIO_HANDLE(master_softc));
+ }
+
+ kmem_free(master_softc, sizeof (agp_master_softc_t));
+ master_softc = NULL;
+
+ return;
+
+}
+
+/*
+ * 965 has a fixed GTT table size (512KB), so check to see the actual aperture
+ * size. Aperture size = GTT table size * 1024.
+ */
+static off_t
+i965_apersize(agp_master_softc_t *agpmaster)
+{
+ off_t apersize;
+
+ apersize = AGPM_READ(agpmaster, PGTBL_CTL);
+ AGPM_DEBUG((CE_NOTE, "i965_apersize: PGTBL_CTL = %lx", apersize));
+ switch (apersize & GTT_SIZE_MASK) {
+ case GTT_2MB:
+ apersize = 2048;
+ break;
+ case GTT_1_5MB:
+ apersize = 1536;
+ break;
+ case GTT_1MB:
+ apersize = 1024;
+ break;
+ case GTT_512KB:
+ apersize = 512;
+ break;
+ case GTT_256KB:
+ apersize = 256;
+ break;
+ case GTT_128KB:
+ apersize = 128;
+ break;
+ default:
+ apersize = 0;
+ AGPM_DEBUG((CE_WARN,
+ "i965_apersize: invalid GTT size in PGTBL_CTL"));
+ }
+ return (apersize);
+}
+
+/*
+ * For Intel 3 series, we need to get GTT size from the GGMS field in GMCH
+ * Graphics Control Register. Return aperture size in MB.
+ */
+static off_t
+i3XX_apersize(ddi_acc_handle_t pci_acc_hdl)
+{
+ uint16_t value;
+ off_t apersize;
+
+ /*
+ * Get the value of configuration register MGGC "Mirror of Dev0 GMCH
+ * Graphics Control" from Internal Graphics #2 (Device2:Function0).
+ */
+ value = pci_config_get16(pci_acc_hdl, I8XX_CONF_GC);
+ AGPM_DEBUG((CE_NOTE, "i3XX_apersize: MGGC = 0x%x", value));
+ /* computing aperture size using the pre-allocated GTT size */
+ switch (value & IX33_GGMS_MASK) {
+ case IX33_GGMS_1M:
+ apersize = 1024;
+ break;
+ case IX33_GGMS_2M:
+ apersize = 2048;
+ break;
+ default:
+ apersize = 0; /* no memory pre-allocated */
+ AGPM_DEBUG((CE_WARN,
+ "i3XX_apersize: no memory allocated for GTT"));
+ }
+ AGPM_DEBUG((CE_NOTE, "i3xx_apersize: apersize = %ldM", apersize));
+ return (apersize);
+}
+
+#define CHECK_STATUS(status) \
+ if (status != DDI_SUCCESS) { \
+ AGPM_DEBUG((CE_WARN, \
+ "set_gtt_mmio: regs_map_setup error")); \
+ return (-1); \
+}
+/*
+ * Set gtt_addr, gtt_mmio_base, igd_apersize, igd_aperbase and igd_devid
+ * according to chipset.
+ */
+static int
+set_gtt_mmio(dev_info_t *devi, agp_master_softc_t *agpmaster,
+ ddi_acc_handle_t pci_acc_hdl)
+{
+ off_t apersize; /* size of graphics mem (MB) == GTT size (KB) */
+ uint32_t value;
+ off_t gmadr_off; /* GMADR offset in PCI config space */
+ int status;
+
+ if (IS_INTEL_X33(agpmaster->agpm_id)) {
+ /* Intel 3 series are similar with 915/945 series */
+ status = ddi_regs_map_setup(devi, I915_GTTADDR,
+ &GTT_ADDR(agpmaster), 0, 0, &i8xx_dev_access,
+ &GTT_HANDLE(agpmaster));
+ CHECK_STATUS(status);
+
+ status = ddi_regs_map_setup(devi, I915_MMADR,
+ &MMIO_BASE(agpmaster), 0, 0, &i8xx_dev_access,
+ &MMIO_HANDLE(agpmaster));
+ CHECK_STATUS(status);
+
+ gmadr_off = I915_CONF_GMADR;
+ /* Different computing method used in getting aperture size. */
+ apersize = i3XX_apersize(pci_acc_hdl);
+ } else if (IS_INTEL_965(agpmaster->agpm_id)) {
+ status = ddi_regs_map_setup(devi, I965_GTTMMADR,
+ &MMIO_BASE(agpmaster), 0, 0, &i8xx_dev_access,
+ &MMIO_HANDLE(agpmaster));
+ CHECK_STATUS(status);
+ if ((agpmaster->agpm_id == INTEL_IGD_GM45) ||
+ IS_INTEL_G4X(agpmaster->agpm_id))
+ GTT_ADDR(agpmaster) =
+ MMIO_BASE(agpmaster) + GM45_GTT_OFFSET;
+ else
+ GTT_ADDR(agpmaster) =
+ MMIO_BASE(agpmaster) + I965_GTT_OFFSET;
+ GTT_HANDLE(agpmaster) = MMIO_HANDLE(agpmaster);
+
+ gmadr_off = I915_CONF_GMADR;
+ apersize = i965_apersize(agpmaster);
+ } else if (IS_INTEL_915(agpmaster->agpm_id)) {
+ /* I915/945 series */
+ status = ddi_regs_map_setup(devi, I915_GTTADDR,
+ &GTT_ADDR(agpmaster), 0, 0, &i8xx_dev_access,
+ &GTT_HANDLE(agpmaster));
+ CHECK_STATUS(status);
+
+ status = ddi_regs_map_setup(devi, I915_MMADR,
+ &MMIO_BASE(agpmaster), 0, 0, &i8xx_dev_access,
+ &MMIO_HANDLE(agpmaster));
+ CHECK_STATUS(status);
+
+ gmadr_off = I915_CONF_GMADR;
+ status = ddi_dev_regsize(devi, I915_GMADR, &apersize);
+ apersize = BYTES2MB(apersize);
+ } else {
+ /* I8XX series */
+ status = ddi_regs_map_setup(devi, I8XX_MMIO_BAR,
+ &MMIO_BASE(agpmaster), 0, 0, &i8xx_dev_access,
+ &MMIO_HANDLE(agpmaster));
+ CHECK_STATUS(status);
+
+ GTT_ADDR(agpmaster) = MMIO_BASE(agpmaster) + I8XX_PTE_OFFSET;
+ GTT_HANDLE(agpmaster) = MMIO_HANDLE(agpmaster);
+ gmadr_off = I8XX_CONF_GMADR;
+ status = ddi_dev_regsize(devi, I8XX_FB_BAR, &apersize);
+ apersize = BYTES2MB(apersize);
+ CHECK_STATUS(status);
+ }
+
+ /*
+ * If memory size is smaller than a certain value, it means
+ * the register set number for graphics memory range might
+ * be wrong
+ */
+ if (status != DDI_SUCCESS || apersize < 4) {
+ AGPM_DEBUG((CE_WARN,
+ "set_gtt_mmio: error in getting graphics memory"));
+ return (-1);
+ }
+
+ agpmaster->agpm_data.agpm_gtt.gtt_info.igd_apersize = apersize;
+
+ /* get graphics memory base address from GMADR */
+ value = pci_config_get32(pci_acc_hdl, gmadr_off);
+ APER_BASE(agpmaster) = value & GTT_BASE_MASK;
+ AGPM_DEBUG((CE_NOTE, "set_gtt_mmio: aperbase = 0x%x, apersize = %ldM, "
+ "gtt_addr = %p, mmio_base = %p", APER_BASE(agpmaster), apersize,
+ (void *)GTT_ADDR(agpmaster), (void *)MMIO_BASE(agpmaster)));
+ return (0);
+}
+
+/*
+ * Try to initialize agp master.
+ * 0 is returned if the device is successfully initialized. AGP master soft
+ * state is returned in master_softcp if needed.
+ * Otherwise -1 is returned and *master_softcp is set to NULL.
+ */
+int
+agpmaster_attach(dev_info_t *devi, agp_master_softc_t **master_softcp,
+ ddi_acc_handle_t pci_acc_hdl, minor_t minor)
+{
+ int instance;
+ int status;
+ agp_master_softc_t *agpmaster;
+ char buf[80];
+
+
+ ASSERT(pci_acc_hdl);
+ *master_softcp = NULL;
+ agpmaster = (agp_master_softc_t *)
+ kmem_zalloc(sizeof (agp_master_softc_t), KM_SLEEP);
+
+ agpmaster->agpm_id =
+ pci_config_get32(pci_acc_hdl, PCI_CONF_VENID);
+ agpmaster->agpm_acc_hdl = pci_acc_hdl;
+
+ if (!detect_i8xx_device(agpmaster)) {
+ /* Intel 8XX, 915, 945 and 965 series */
+ if (set_gtt_mmio(devi, agpmaster, pci_acc_hdl) != 0)
+ goto fail;
+ } else if (detect_agp_devcice(agpmaster, pci_acc_hdl)) {
+ /* non IGD or AGP devices, AMD64 gart */
+ AGPM_DEBUG((CE_WARN,
+ "agpmaster_attach: neither IGD or AGP devices exists"));
+ agpmaster_detach(&agpmaster);
+ return (0);
+ }
+
+ agpmaster->agpm_data.agpm_gtt.gtt_info.igd_devid =
+ agpmaster->agpm_id;
+
+ /* create minor node for IGD or AGP device */
+ instance = ddi_get_instance(devi);
+
+ (void) sprintf(buf, "%s%d", AGPMASTER_NAME, instance);
+ status = ddi_create_minor_node(devi, buf, S_IFCHR, minor,
+ DDI_NT_AGP_MASTER, 0);
+
+ if (status != DDI_SUCCESS) {
+ AGPM_DEBUG((CE_WARN,
+ "agpmaster_attach: create agpmaster node failed"));
+ goto fail;
+ }
+
+ *master_softcp = agpmaster;
+ return (0);
+fail:
+ agpmaster_detach(&agpmaster);
+ return (-1);
+}
+
+/*
+ * Currently, it handles ioctl requests related with agp master device for
+ * layered driver (agpgart) only.
+ */
+/*ARGSUSED*/
+int
+agpmaster_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *cred,
+ int *rval, agp_master_softc_t *softc)
+{
+ uint32_t base;
+ uint32_t addr;
+ igd_gtt_seg_t seg;
+ agp_info_t info;
+ uint32_t value;
+ off_t cap;
+ uint32_t command;
+ static char kernel_only[] =
+ "agpmaster_ioctl: %s is a kernel only ioctl";
+
+ CONFIRM(softc);
+
+ switch (cmd) {
+ case DEVICE_DETECT:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only, "DEVICE_DETECT"));
+ return (ENXIO);
+ }
+
+ if (ddi_copyout(&softc->agpm_dev_type,
+ (void *)data, sizeof (int), mode))
+ return (EFAULT);
+ break;
+ case AGP_MASTER_SETCMD:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only, "AGP_MASTER_SETCMD"));
+ return (ENXIO);
+ }
+
+ CONFIRM(softc->agpm_dev_type == DEVICE_IS_AGP);
+ CONFIRM(softc->agpm_data.agpm_acaptr);
+
+ if (ddi_copyin((void *)data, &command,
+ sizeof (uint32_t), mode))
+ return (EFAULT);
+
+ pci_config_put32(softc->agpm_acc_hdl,
+ softc->agpm_data.agpm_acaptr + AGP_CONF_COMMAND,
+ command);
+ break;
+ case AGP_MASTER_GETINFO:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only,
+ "AGP_MASTER_GETINFO"));
+ return (ENXIO);
+ }
+
+ CONFIRM(softc->agpm_dev_type == DEVICE_IS_AGP);
+ CONFIRM(softc->agpm_data.agpm_acaptr);
+
+ cap = softc->agpm_data.agpm_acaptr;
+ value = pci_config_get32(softc->agpm_acc_hdl, cap);
+ info.agpi_version.agpv_major = (uint16_t)((value >> 20) & 0xf);
+ info.agpi_version.agpv_minor = (uint16_t)((value >> 16) & 0xf);
+ info.agpi_devid = softc->agpm_id;
+ info.agpi_mode = pci_config_get32(
+ softc->agpm_acc_hdl, cap + AGP_CONF_STATUS);
+
+ if (ddi_copyout(&info, (void *)data,
+ sizeof (agp_info_t), mode))
+ return (EFAULT);
+ break;
+ case I810_SET_GTT_BASE:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only, "I810_SET_GTT_ADDR"));
+ return (ENXIO);
+ }
+
+ CONFIRM(softc->agpm_dev_type == DEVICE_IS_I810);
+
+ if (ddi_copyin((void *)data, &base, sizeof (uint32_t), mode))
+ return (EFAULT);
+
+ /* enables page table */
+ addr = (base & GTT_BASE_MASK) | GTT_TABLE_VALID;
+
+ AGPM_WRITE(softc, PGTBL_CTL, addr);
+ break;
+ case I8XX_GET_INFO:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only, "I8XX_GET_INFO"));
+ return (ENXIO);
+ }
+
+ CONFIRM(IS_IGD(softc));
+
+ if (ddi_copyout(&softc->agpm_data.agpm_gtt.gtt_info,
+ (void *)data, sizeof (igd_info_t), mode))
+ return (EFAULT);
+ break;
+ case I8XX_ADD2GTT:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only, "I8XX_ADD2GTT"));
+ return (ENXIO);
+ }
+
+ CONFIRM(IS_IGD(softc));
+
+ if (ddi_copyin((void *)data, &seg,
+ sizeof (igd_gtt_seg_t), mode))
+ return (EFAULT);
+
+ if (i8xx_add_to_gtt(&softc->agpm_data.agpm_gtt, seg))
+ return (EINVAL);
+ break;
+ case I8XX_REM_GTT:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only, "I8XX_REM_GTT"));
+ return (ENXIO);
+ }
+
+ CONFIRM(IS_IGD(softc));
+
+ if (ddi_copyin((void *)data, &seg,
+ sizeof (igd_gtt_seg_t), mode))
+ return (EFAULT);
+
+ i8xx_remove_from_gtt(&softc->agpm_data.agpm_gtt, seg);
+ break;
+ case I8XX_UNCONFIG:
+ if (!(mode & FKIOCTL)) {
+ AGPM_DEBUG((CE_CONT, kernel_only, "I8XX_UNCONFIG"));
+ return (ENXIO);
+ }
+
+ CONFIRM(IS_IGD(softc));
+
+ if (softc->agpm_dev_type == DEVICE_IS_I810)
+ AGPM_WRITE(softc, PGTBL_CTL, 0);
+ /*
+ * may need to clear all gtt entries here for i830 series,
+ * but may not be necessary
+ */
+ break;
+ }
+ return (0);
+}
+
+/*
+ * If AGP cap pointer is successfully found, none-zero value is returned.
+ * Otherwise 0 is returned.
+ */
+static off_t
+agpmaster_cap_find(ddi_acc_handle_t acc_handle)
+{
+ off_t nextcap;
+ uint32_t ncapid;
+ uint8_t value;
+
+ /* check if this device supports capibility pointer */
+ value = (uint8_t)(pci_config_get16(acc_handle, PCI_CONF_STAT)
+ & PCI_CONF_CAP_MASK);
+
+ if (!value)
+ return (0);
+ /* get the offset of the first capability pointer from CAPPTR */
+ nextcap = (off_t)(pci_config_get8(acc_handle, AGP_CONF_CAPPTR));
+
+ /* check AGP capability from the first capability pointer */
+ while (nextcap) {
+ ncapid = pci_config_get32(acc_handle, nextcap);
+ if ((ncapid & PCI_CONF_CAPID_MASK)
+ == AGP_CAP_ID) /* find AGP cap */
+ break;
+
+ nextcap = (off_t)((ncapid & PCI_CONF_NCAPID_MASK) >> 8);
+ }
+
+ return (nextcap);
+
+}
+
+/*
+ * If i8xx device is successfully detected, 0 is returned.
+ * Otherwise -1 is returned.
+ */
+static int
+detect_i8xx_device(agp_master_softc_t *master_softc)
+{
+
+ switch (master_softc->agpm_id) {
+ case INTEL_IGD_810:
+ case INTEL_IGD_810DC:
+ case INTEL_IGD_810E:
+ case INTEL_IGD_815:
+ master_softc->agpm_dev_type = DEVICE_IS_I810;
+ break;
+ case INTEL_IGD_830M:
+ case INTEL_IGD_845G:
+ case INTEL_IGD_855GM:
+ case INTEL_IGD_865G:
+ case INTEL_IGD_915:
+ case INTEL_IGD_915GM:
+ case INTEL_IGD_945:
+ case INTEL_IGD_945GM:
+ case INTEL_IGD_945GME:
+ case INTEL_IGD_946GZ:
+ case INTEL_IGD_965G1:
+ case INTEL_IGD_965G2:
+ case INTEL_IGD_965GM:
+ case INTEL_IGD_965GME:
+ case INTEL_IGD_965Q:
+ case INTEL_IGD_Q35:
+ case INTEL_IGD_G33:
+ case INTEL_IGD_Q33:
+ case INTEL_IGD_GM45:
+ case INTEL_IGD_EL:
+ case INTEL_IGD_Q45:
+ case INTEL_IGD_G45:
+ case INTEL_IGD_G41:
+ case INTEL_IGD_IGDNG_D:
+ case INTEL_IGD_IGDNG_M:
+ case INTEL_IGD_B43:
+ master_softc->agpm_dev_type = DEVICE_IS_I830;
+ break;
+ default: /* unknown id */
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * If agp master is successfully detected, 0 is returned.
+ * Otherwise -1 is returned.
+ */
+static int
+detect_agp_devcice(agp_master_softc_t *master_softc,
+ ddi_acc_handle_t acc_handle)
+{
+ off_t cap;
+
+ cap = agpmaster_cap_find(acc_handle);
+ if (cap) {
+ master_softc->agpm_dev_type = DEVICE_IS_AGP;
+ master_softc->agpm_data.agpm_acaptr = cap;
+ return (0);
+ } else {
+ return (-1);
+ }
+
+}
+
+/*
+ * Please refer to GART and GTT entry format table in agpdefs.h for
+ * intel GTT entry format.
+ */
+static int
+phys2entry(uint32_t type, uint32_t physaddr, uint32_t *entry)
+{
+ uint32_t value;
+
+ switch (type) {
+ case AGP_PHYSICAL:
+ case AGP_NORMAL:
+ value = (physaddr & GTT_PTE_MASK) | GTT_PTE_VALID;
+ break;
+ default:
+ return (-1);
+ }
+
+ *entry = value;
+
+ return (0);
+}
+
+static int
+i8xx_add_to_gtt(gtt_impl_t *gtt, igd_gtt_seg_t seg)
+{
+ int i;
+ uint32_t *paddr;
+ uint32_t entry;
+ uint32_t maxpages;
+
+ maxpages = gtt->gtt_info.igd_apersize;
+ maxpages = GTT_MB_TO_PAGES(maxpages);
+
+ paddr = seg.igs_phyaddr;
+
+ /* check if gtt max page number is reached */
+ if ((seg.igs_pgstart + seg.igs_npage) > maxpages)
+ return (-1);
+
+ paddr = seg.igs_phyaddr;
+ for (i = seg.igs_pgstart; i < (seg.igs_pgstart + seg.igs_npage);
+ i++, paddr++) {
+ if (phys2entry(seg.igs_type, *paddr, &entry))
+ return (-1);
+ ddi_put32(gtt->gtt_handle,
+ (uint32_t *)(gtt->gtt_addr + i * sizeof (uint32_t)),
+ entry);
+ }
+
+ return (0);
+}
+
+static void
+i8xx_remove_from_gtt(gtt_impl_t *gtt, igd_gtt_seg_t seg)
+{
+ int i;
+ uint32_t maxpages;
+
+ maxpages = gtt->gtt_info.igd_apersize;
+ maxpages = GTT_MB_TO_PAGES(maxpages);
+
+ /* check if gtt max page number is reached */
+ if ((seg.igs_pgstart + seg.igs_npage) > maxpages)
+ return;
+
+ for (i = seg.igs_pgstart; i < (seg.igs_pgstart + seg.igs_npage); i++) {
+ ddi_put32(gtt->gtt_handle,
+ (uint32_t *)(gtt->gtt_addr + i * sizeof (uint32_t)), 0);
+ }
+}
diff --git a/usr/src/uts/intel/io/drm/drm_pciids.h b/usr/src/uts/intel/io/drm/drm_pciids.h
new file mode 100644
index 0000000..18176ed
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/drm_pciids.h
@@ -0,0 +1,219 @@
+/*
+ * This file is auto-generated from the drm_pciids.txt in the DRM CVS
+ * Please contact dri-devel@lists.sf.net to add new cards to this list
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DRM_PCIIDS_H_
+#define _DRM_PCIIDS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define radeon_PCI_IDS\
+ {0x1002, 0x4136, CHIP_RS100|RADEON_IS_IGP, \
+ "ATI Radeon RS100 IGP 320M"}, \
+ {0x1002, 0x4137, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS200 IGP"}, \
+ {0x1002, 0x4144, CHIP_R300, "ATI Radeon AD 9500 Pro"}, \
+ {0x1002, 0x4145, CHIP_R300, "ATI Radeon AE 9700 Pro"}, \
+ {0x1002, 0x4146, CHIP_R300, "ATI Radeon AF 9700 Pro"}, \
+ {0x1002, 0x4147, CHIP_R300, "ATI FireGL AG Z1/X1"}, \
+ {0x1002, 0x4150, CHIP_RV350, "ATI Radeon AP 9600"}, \
+ {0x1002, 0x4151, CHIP_RV350, "ATI Radeon AQ 9600"}, \
+ {0x1002, 0x4152, CHIP_RV350, "ATI Radeon AR 9600"}, \
+ {0x1002, 0x4153, CHIP_RV350, "ATI Radeon AS 9600 AS"}, \
+ {0x1002, 0x4154, CHIP_RV350, "ATI FireGL AT T2"}, \
+ {0x1002, 0x4156, CHIP_RV350, "ATI FireGL AV T2"}, \
+ {0x1002, 0x4237, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS250 IGP"}, \
+ {0x1002, 0x4242, CHIP_R200, "ATI Radeon BB R200 AIW 8500DV"}, \
+ {0x1002, 0x4243, CHIP_R200, "ATI Radeon BC R200"}, \
+ {0x1002, 0x4336, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS100 Mobility U1"}, \
+ {0x1002, 0x4337, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS200 Mobility IGP 340M"}, \
+ {0x1002, 0x4437, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS250 Mobility IGP"}, \
+ {0x1002, 0x4966, CHIP_RV250, "ATI Radeon If R250 9000"}, \
+ {0x1002, 0x4967, CHIP_RV250, "ATI Radeon Ig R250 9000"}, \
+ {0x1002, 0x4A49, CHIP_R420, "ATI Radeon JI R420 X800PRO"}, \
+ {0x1002, 0x4A4B, CHIP_R420, "ATI Radeon JK R420 X800 XT"}, \
+ {0x1002, 0x4C57, CHIP_RV200|RADEON_IS_MOBILITY, \
+ "ATI Radeon LW RV200 Mobility 7500 M7"}, \
+ {0x1002, 0x4C58, CHIP_RV200|RADEON_IS_MOBILITY, \
+ "ATI Radeon LX RV200 Mobility FireGL 7800 M7"}, \
+ {0x1002, 0x4C59, CHIP_RV100|RADEON_IS_MOBILITY, \
+ "ATI Radeon LY RV100 Mobility M6"}, \
+ {0x1002, 0x4C5A, CHIP_RV100|RADEON_IS_MOBILITY, \
+ "ATI Radeon LZ RV100 Mobility M6"}, \
+ {0x1002, 0x4C64, CHIP_RV250|RADEON_IS_MOBILITY, \
+ "ATI Radeon Ld RV250 Mobility 9000 M9"}, \
+ {0x1002, 0x4C66, CHIP_RV250|RADEON_IS_MOBILITY, \
+ "ATI Radeon Lf R250 Mobility 9000 M9"}, \
+ {0x1002, 0x4C67, CHIP_RV250|RADEON_IS_MOBILITY, \
+ "ATI Radeon Lg R250 Mobility 9000 M9"}, \
+ {0x1002, 0x4E44, CHIP_R300, "ATI Radeon ND R300 9700 Pro"}, \
+ {0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro"}, \
+ {0x1002, 0x4E46, CHIP_RV350, "ATI Radeon NF RV350 9600"}, \
+ {0x1002, 0x4E47, CHIP_R300, "ATI Radeon NG R300 FireGL X1"}, \
+ {0x1002, 0x4E48, CHIP_R350, "ATI Radeon NH R350 9800 Pro"}, \
+ {0x1002, 0x4E49, CHIP_R350, "ATI Radeon NI R350 9800"}, \
+ {0x1002, 0x4E4A, CHIP_RV350, "ATI Radeon NJ RV350 9800 XT"}, \
+ {0x1002, 0x4E4B, CHIP_R350, "ATI Radeon NK R350 FireGL X2"}, \
+ {0x1002, 0x4E50, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV300 Mobility 9600 M10"}, \
+ {0x1002, 0x4E51, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV350 Mobility 9600 M10 NQ"}, \
+ {0x1002, 0x4E54, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon FireGL T2 128"}, \
+ {0x1002, 0x4E56, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon FireGL Mobility T2e"}, \
+ {0x1002, 0x5144, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QD R100"}, \
+ {0x1002, 0x5145, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QE R100"}, \
+ {0x1002, 0x5146, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QF R100"}, \
+ {0x1002, 0x5147, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QG R100"}, \
+ {0x1002, 0x5148, CHIP_R200, "ATI Radeon QH R200 8500"}, \
+ {0x1002, 0x5149, CHIP_R200, "ATI Radeon QI R200"}, \
+ {0x1002, 0x514A, CHIP_R200, "ATI Radeon QJ R200"}, \
+ {0x1002, 0x514B, CHIP_R200, "ATI Radeon QK R200"}, \
+ {0x1002, 0x514C, CHIP_R200, "ATI Radeon QL R200 8500 LE"}, \
+ {0x1002, 0x514D, CHIP_R200, "ATI Radeon QM R200 9100"}, \
+ {0x1002, 0x514E, CHIP_R200, "ATI Radeon QN R200 8500 LE"}, \
+ {0x1002, 0x514F, CHIP_R200, "ATI Radeon QO R200 8500 LE"}, \
+ {0x1002, 0x5157, CHIP_RV200, "ATI Radeon QW RV200 7500"}, \
+ {0x1002, 0x5158, CHIP_RV200, "ATI Radeon QX RV200 7500"}, \
+ {0x1002, 0x5159, CHIP_RV100, "ATI Radeon QY RV100 7000/VE"}, \
+ {0x1002, 0x515A, CHIP_RV100, "ATI Radeon QZ RV100 7000/VE"}, \
+ {0x1002, 0x515E, CHIP_RV100, "ATI ES1000 RN50"}, \
+ {0x1002, 0x5168, CHIP_R200, "ATI Radeon Qh R200"}, \
+ {0x1002, 0x5169, CHIP_R200, "ATI Radeon Qi R200"}, \
+ {0x1002, 0x516A, CHIP_R200, "ATI Radeon Qj R200"}, \
+ {0x1002, 0x516B, CHIP_R200, "ATI Radeon Qk R200"}, \
+ {0x1002, 0x516C, CHIP_R200, "ATI Radeon Ql R200"}, \
+ {0x1002, 0x5460, CHIP_RV350, "ATI Radeon X300"}, \
+ {0x1002, 0x554F, CHIP_R350, "ATI Radeon X800"}, \
+ {0x1002, 0x5653, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, \
+ "ATI Radeon Mobility X700 M26"}, \
+ {0x1002, 0x5834, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 IGP"}, \
+ {0x1002, 0x5835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS300 Mobility IGP"}, \
+ {0x1002, 0x5836, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 IGP"}, \
+ {0x1002, 0x5837, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 IGP"}, \
+ {0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
+ {0x1002, 0x5962, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5963, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5964, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
+ {0x1002, 0x5968, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5969, CHIP_RV100, "ATI ES1000 RN50"}, \
+ {0x1002, 0x596A, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x596B, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5b60, CHIP_RV350, "ATI Radeon RV370 X300SE"}, \
+ {0x1002, 0x5c61, CHIP_RV280|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV280 Mobility"}, \
+ {0x1002, 0x5c62, CHIP_RV280, "ATI Radeon RV280"}, \
+ {0x1002, 0x5c63, CHIP_RV280|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV280 Mobility"}, \
+ {0x1002, 0x5c64, CHIP_RV280, "ATI Radeon RV280"}, \
+ {0x1002, 0x5d4d, CHIP_R350, "ATI Radeon R480"}, \
+ {0, 0, 0, NULL}
+
+#define r128_PCI_IDS\
+ {0x1002, 0x4c45, 0, "ATI Rage 128 Mobility LE (PCI)"}, \
+ {0x1002, 0x4c46, 0, "ATI Rage 128 Mobility LF (AGP)"}, \
+ {0x1002, 0x4d46, 0, "ATI Rage 128 Mobility MF (AGP)"}, \
+ {0x1002, 0x4d4c, 0, "ATI Rage 128 Mobility ML (AGP)"}, \
+ {0x1002, 0x5041, 0, "ATI Rage 128 Pro PA (PCI)"}, \
+ {0x1002, 0x5042, 0, "ATI Rage 128 Pro PB (AGP)"}, \
+ {0x1002, 0x5043, 0, "ATI Rage 128 Pro PC (AGP)"}, \
+ {0x1002, 0x5044, 0, "ATI Rage 128 Pro PD (PCI)"}, \
+ {0x1002, 0x5045, 0, "ATI Rage 128 Pro PE (AGP)"}, \
+ {0x1002, 0x5046, 0, "ATI Rage 128 Pro PF (AGP)"}, \
+ {0x1002, 0x5047, 0, "ATI Rage 128 Pro PG (PCI)"}, \
+ {0x1002, 0x5048, 0, "ATI Rage 128 Pro PH (AGP)"}, \
+ {0x1002, 0x5049, 0, "ATI Rage 128 Pro PI (AGP)"}, \
+ {0x1002, 0x504A, 0, "ATI Rage 128 Pro PJ (PCI)"}, \
+ {0x1002, 0x504B, 0, "ATI Rage 128 Pro PK (AGP)"}, \
+ {0x1002, 0x504C, 0, "ATI Rage 128 Pro PL (AGP)"}, \
+ {0x1002, 0x504D, 0, "ATI Rage 128 Pro PM (PCI)"}, \
+ {0x1002, 0x504E, 0, "ATI Rage 128 Pro PN (AGP)"}, \
+ {0x1002, 0x504F, 0, "ATI Rage 128 Pro PO (AGP)"}, \
+ {0x1002, 0x5050, 0, "ATI Rage 128 Pro PP (PCI)"}, \
+ {0x1002, 0x5051, 0, "ATI Rage 128 Pro PQ (AGP)"}, \
+ {0x1002, 0x5052, 0, "ATI Rage 128 Pro PR (PCI)"}, \
+ {0x1002, 0x5053, 0, "ATI Rage 128 Pro PS (PCI)"}, \
+ {0x1002, 0x5054, 0, "ATI Rage 128 Pro PT (AGP)"}, \
+ {0x1002, 0x5055, 0, "ATI Rage 128 Pro PU (AGP)"}, \
+ {0x1002, 0x5056, 0, "ATI Rage 128 Pro PV (PCI)"}, \
+ {0x1002, 0x5057, 0, "ATI Rage 128 Pro PW (AGP)"}, \
+ {0x1002, 0x5058, 0, "ATI Rage 128 Pro PX (AGP)"}, \
+ {0x1002, 0x5245, 0, "ATI Rage 128 RE (PCI)"}, \
+ {0x1002, 0x5246, 0, "ATI Rage 128 RF (AGP)"}, \
+ {0x1002, 0x5247, 0, "ATI Rage 128 RG (AGP)"}, \
+ {0x1002, 0x524b, 0, "ATI Rage 128 RK (PCI)"}, \
+ {0x1002, 0x524c, 0, "ATI Rage 128 RL (AGP)"}, \
+ {0x1002, 0x534d, 0, "ATI Rage 128 SM (AGP)"}, \
+ {0x1002, 0x5446, 0, "ATI Rage 128 Pro Ultra TF (AGP)"}, \
+ {0x1002, 0x544C, 0, "ATI Rage 128 Pro Ultra TL (AGP)"}, \
+ {0x1002, 0x5452, 0, "ATI Rage 128 Pro Ultra TR (AGP)"}, \
+ {0, 0, 0, NULL}
+
+#define mach64_PCI_IDS\
+ {0x1002, 0x4749, 0, "3D Rage Pro"}, \
+ {0x1002, 0x4750, 0, "3D Rage Pro 215GP"}, \
+ {0x1002, 0x4751, 0, "3D Rage Pro 215GQ"}, \
+ {0x1002, 0x4742, 0, "3D Rage Pro AGP 1X/2X"}, \
+ {0x1002, 0x4744, 0, "3D Rage Pro AGP 1X"}, \
+ {0x1002, 0x4c49, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c50, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c51, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c42, 0, "3D Rage LT Pro AGP-133"}, \
+ {0x1002, 0x4c44, 0, "3D Rage LT Pro AGP-66"}, \
+ {0x1002, 0x474c, 0, "Rage XC"}, \
+ {0x1002, 0x474f, 0, "Rage XL"}, \
+ {0x1002, 0x4752, 0, "Rage XL"}, \
+ {0x1002, 0x4753, 0, "Rage XC"}, \
+ {0x1002, 0x474d, 0, "Rage XL AGP 2X"}, \
+ {0x1002, 0x474e, 0, "Rage XC AGP"}, \
+ {0x1002, 0x4c52, 0, "Rage Mobility P/M"}, \
+ {0x1002, 0x4c53, 0, "Rage Mobility L"}, \
+ {0x1002, 0x4c4d, 0, "Rage Mobility P/M AGP 2X"}, \
+ {0x1002, 0x4c4e, 0, "Rage Mobility L AGP 2X"}, \
+ {0, 0, 0, NULL}
+
+#define i915_PCI_IDS\
+ {0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
+ {0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
+ {0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
+ {0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
+ {0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \
+ {0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \
+ {0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \
+ {0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \
+ {0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \
+ {0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+ {0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
+ {0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+ {0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
+ {0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
+ {0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
+ {0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
+ {0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
+ {0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Intel GM45"}, \
+ {0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel EL"}, \
+ {0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45"}, \
+ {0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45"}, \
+ {0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
+ {0x8086, 0x42, CHIP_I9XX|CHIP_I965, "Intel IGDNG_D"}, \
+ {0x8086, 0x46, CHIP_I9XX|CHIP_I965, "Intel IGDNG_M"}, \
+ {0x8086, 0x2E42, CHIP_I9XX|CHIP_I965, "Intel B43"}, \
+ {0, 0, 0, NULL}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DRM_PCIIDS_H_ */
diff --git a/usr/src/uts/intel/io/drm/i915_dma.c b/usr/src/uts/intel/io/drm/i915_dma.c
new file mode 100644
index 0000000..48ff692
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_dma.c
@@ -0,0 +1,1146 @@
+/* BEGIN CSTYLED */
+
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+
+
+/* Really want an OS-independent resettable timer. Would like to have
+ * this loop run for (eg) 3 sec, but have the timer reset every time
+ * the head pointer changes, so that EBUSY only happens if the ring
+ * actually stalls for (eg) 3 seconds.
+ */
+/*ARGSUSED*/
+int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+ u32 last_acthd = I915_READ(acthd_reg);
+ u32 acthd;
+ int i;
+
+ for (i = 0; i < 100000; i++) {
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ acthd = I915_READ(acthd_reg);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+ if (ring->space >= n)
+ return 0;
+
+ if (ring->head != last_head)
+ i = 0;
+
+ if (acthd != last_acthd)
+ i = 0;
+
+ last_head = ring->head;
+ last_acthd = acthd;
+ DRM_UDELAY(10);
+ }
+
+ return (EBUSY);
+}
+
+int i915_init_hardware_status(drm_device_t *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_dma_handle_t *dmah;
+
+ /* Program Hardware Status Page */
+ dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff,1);
+
+ if (!dmah) {
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->status_page_dmah = dmah;
+ dev_priv->hw_status_page = (void *)dmah->vaddr;
+ dev_priv->dma_status_page = dmah->paddr;
+
+ (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ (void) I915_READ(HWS_PGA);
+
+ DRM_DEBUG("Enabled hardware status page add 0x%lx read GEM HWS 0x%x\n",dev_priv->hw_status_page, READ_HWSP(dev_priv, 0x20));
+ return 0;
+}
+
+void i915_free_hardware_status(drm_device_t *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!I915_NEED_GFX_HWS(dev)) {
+ if (dev_priv->status_page_dmah) {
+ DRM_DEBUG("free status_page_dmal %x", dev_priv->status_page_dmah);
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+ } else {
+ if (dev_priv->status_gfx_addr) {
+ DRM_DEBUG("free status_gfx_addr %x", dev_priv->status_gfx_addr);
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+ }
+
+}
+
+void i915_kernel_lost_context(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+
+}
+
+static int i915_dma_cleanup(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv =
+ (drm_i915_private_t *) dev->dev_private;
+
+ /* Make sure interrupts are disabled here because the uninstall ioctl
+ * may not have been called from userspace and after dev_private
+ * is freed, it's too late.
+ */
+ if (dev->irq_enabled)
+ (void) drm_irq_uninstall(dev);
+
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+ dev_priv->ring.virtual_start = 0;
+ dev_priv->ring.map.handle = 0;
+ dev_priv->ring.map.size = 0;
+ }
+
+#ifdef I915_HAVE_GEM
+ if (I915_NEED_GFX_HWS(dev))
+#endif
+ i915_free_hardware_status(dev);
+
+ dev_priv->sarea = NULL;
+ dev_priv->sarea_priv = NULL;
+
+ return 0;
+}
+
+static int i915_initialize(drm_device_t * dev,
+ drm_i915_init_t * init)
+{
+ drm_i915_private_t *dev_priv =
+ (drm_i915_private_t *)dev->dev_private;
+
+ DRM_GETSAREA();
+ if (!dev_priv->sarea) {
+ DRM_ERROR("can not find sarea!\n");
+ dev->dev_private = (void *)dev_priv;
+ (void) i915_dma_cleanup(dev);
+ return (EINVAL);
+ }
+
+ dev_priv->sarea_priv = (drm_i915_sarea_t *)(uintptr_t)
+ ((u8 *) dev_priv->sarea->handle +
+ init->sarea_priv_offset);
+
+ if (init->ring_size != 0) {
+ if (dev_priv->ring.ring_obj != NULL) {
+ (void) i915_dma_cleanup(dev);
+ DRM_ERROR("Client tried to initialize ringbuffer in "
+ "GEM mode\n");
+ return -EINVAL;
+ }
+
+ dev_priv->ring.Size = init->ring_size;
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+ dev_priv->ring.map.offset = (u_offset_t)init->ring_start;
+ dev_priv->ring.map.size = init->ring_size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->ring.map, dev);
+
+ if (dev_priv->ring.map.handle == NULL) {
+ (void) i915_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return (ENOMEM);
+ }
+ }
+
+ dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.dev_addr;
+ dev_priv->cpp = init->cpp;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->front_offset = init->front_offset;
+ dev_priv->current_page = 0;
+ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->allow_batchbuffer = 1;
+ return 0;
+}
+
+static int i915_dma_resume(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv->sarea) {
+ DRM_ERROR("can not find sarea!\n");
+ return (EINVAL);
+ }
+
+ if (dev_priv->ring.map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return (ENOMEM);
+ }
+
+ /* Program Hardware Status Page */
+ if (!dev_priv->hw_status_page) {
+ DRM_ERROR("Can not find hardware status page\n");
+ return (EINVAL);
+ }
+ DRM_DEBUG("i915_dma_resume hw status page @ %p\n", dev_priv->hw_status_page);
+
+ if (!I915_NEED_GFX_HWS(dev))
+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ else
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ DRM_DEBUG("Enabled hardware status page\n");
+
+ return 0;
+}
+
+/*ARGSUSED*/
+static int i915_dma_init(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_init_t init;
+ int retcode = 0;
+
+ DRM_COPYFROM_WITH_RETURN(&init, (drm_i915_init_t *)data, sizeof(init));
+
+ switch (init.func) {
+ case I915_INIT_DMA:
+ retcode = i915_initialize(dev, &init);
+ break;
+ case I915_CLEANUP_DMA:
+ retcode = i915_dma_cleanup(dev);
+ break;
+ case I915_RESUME_DMA:
+ retcode = i915_dma_resume(dev);
+ break;
+ default:
+ retcode = EINVAL;
+ break;
+ }
+
+ return retcode;
+}
+
+/* Implement basically the same security restrictions as hardware does
+ * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
+ *
+ * Most of the calculations below involve calculating the size of a
+ * particular instruction. It's important to get the size right as
+ * that tells us where the next instruction to check is. Any illegal
+ * instruction detected will be given a size of zero, which is a
+ * signal to abort the rest of the buffer.
+ */
+static int do_validate_cmd(int cmd)
+{
+ switch (((cmd >> 29) & 0x7)) {
+ case 0x0:
+ switch ((cmd >> 23) & 0x3f) {
+ case 0x0:
+ return 1; /* MI_NOOP */
+ case 0x4:
+ return 1; /* MI_FLUSH */
+ default:
+ return 0; /* disallow everything else */
+ }
+#ifndef __SUNPRO_C
+ break;
+#endif
+ case 0x1:
+ return 0; /* reserved */
+ case 0x2:
+ return (cmd & 0xff) + 2; /* 2d commands */
+ case 0x3:
+ if (((cmd >> 24) & 0x1f) <= 0x18)
+ return 1;
+
+ switch ((cmd >> 24) & 0x1f) {
+ case 0x1c:
+ return 1;
+ case 0x1d:
+ switch ((cmd >> 16) & 0xff) {
+ case 0x3:
+ return (cmd & 0x1f) + 2;
+ case 0x4:
+ return (cmd & 0xf) + 2;
+ default:
+ return (cmd & 0xffff) + 2;
+ }
+ case 0x1e:
+ if (cmd & (1 << 23))
+ return (cmd & 0xffff) + 1;
+ else
+ return 1;
+ case 0x1f:
+ if ((cmd & (1 << 23)) == 0) /* inline vertices */
+ return (cmd & 0x1ffff) + 2;
+ else if (cmd & (1 << 17)) /* indirect random */
+ if ((cmd & 0xffff) == 0)
+ return 0; /* unknown length, too hard */
+ else
+ return (((cmd & 0xffff) + 1) / 2) + 1;
+ else
+ return 2; /* indirect sequential */
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+
+#ifndef __SUNPRO_C
+ return 0;
+#endif
+}
+
+static int validate_cmd(int cmd)
+{
+ int ret = do_validate_cmd(cmd);
+
+/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
+
+ return ret;
+}
+
+static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+ RING_LOCALS;
+
+ if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) {
+ DRM_ERROR(" emit cmds invalid arg");
+ return (EINVAL);
+ }
+ BEGIN_LP_RING((dwords+1)&~1);
+
+ for (i = 0; i < dwords;) {
+ int cmd, sz;
+
+ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) {
+ DRM_ERROR("emit cmds failed to get cmd from user");
+ return (EINVAL);
+ }
+
+ if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) {
+ DRM_ERROR("emit cmds invalid");
+ return (EINVAL);
+ }
+ OUT_RING(cmd);
+
+ while (++i, --sz) {
+ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
+ sizeof(cmd))) {
+ DRM_ERROR("emit cmds failed get cmds");
+ return (EINVAL);
+ }
+ OUT_RING(cmd);
+ }
+ }
+
+ if (dwords & 1)
+ OUT_RING(0);
+
+ ADVANCE_LP_RING();
+
+ return 0;
+}
+
+int i915_emit_box(drm_device_t * dev,
+ drm_clip_rect_t __user * boxes,
+ int i, int DR1, int DR4)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_clip_rect_t box;
+ RING_LOCALS;
+
+ if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+ DRM_ERROR("emit box failed to copy from user");
+ return (EFAULT);
+ }
+
+ if (box.y2 <= box.y1 || box.x2 <= box.x1) {
+ DRM_ERROR("Bad box %d,%d..%d,%d\n",
+ box.x1, box.y1, box.x2, box.y2);
+ return (EINVAL);
+ }
+
+ if (IS_I965G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING(DR4);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(6);
+ OUT_RING(GFX_OP_DRAWRECT_INFO);
+ OUT_RING(DR1);
+ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING(DR4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
+
+ return 0;
+}
+
+/* XXX: Emitting the counter should really be moved to part of the IRQ
+ * emit. For now, do it in both places:
+ */
+
+void i915_emit_breadcrumb(drm_device_t *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 0;
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+}
+
+static int i915_dispatch_cmdbuffer(drm_device_t * dev,
+ drm_i915_cmdbuffer_t * cmd)
+{
+ int nbox = cmd->num_cliprects;
+ int i = 0, count, ret;
+
+ if (cmd->sz & 0x3) {
+ DRM_ERROR("alignment");
+ return (EINVAL);
+ }
+
+ i915_kernel_lost_context(dev);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ ret = i915_emit_box(dev, cmd->cliprects, i,
+ cmd->DR1, cmd->DR4);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_emit_cmds(dev, (int __user *)(void *)cmd->buf, cmd->sz / 4);
+ if (ret)
+ return ret;
+ }
+
+ i915_emit_breadcrumb( dev );
+ return 0;
+}
+
+static int i915_dispatch_batchbuffer(drm_device_t * dev,
+ drm_i915_batchbuffer_t * batch)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_clip_rect_t __user *boxes = batch->cliprects;
+ int nbox = batch->num_cliprects;
+ int i = 0, count;
+ RING_LOCALS;
+
+ if ((batch->start | batch->used) & 0x7) {
+ DRM_ERROR("alignment");
+ return (EINVAL);
+ }
+
+ i915_kernel_lost_context(dev);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, boxes, i,
+ batch->DR1, batch->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ OUT_RING(batch->start + batch->used - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(2);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ OUT_RING(batch->start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ }
+ ADVANCE_LP_RING();
+ }
+ }
+
+ i915_emit_breadcrumb( dev );
+
+ return 0;
+}
+
+static int i915_dispatch_flip(struct drm_device * dev, int planes)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ if (!dev_priv->sarea_priv)
+ return -EINVAL;
+
+ DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
+ planes, dev_priv->sarea_priv->pf_current_page);
+
+ i915_kernel_lost_context(dev);
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_FLUSH | MI_READ_FLUSH);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+ BEGIN_LP_RING(6);
+ OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+ OUT_RING(0);
+ if (dev_priv->current_page == 0) {
+ OUT_RING(dev_priv->back_offset);
+ dev_priv->current_page = 1;
+ } else {
+ OUT_RING(dev_priv->front_offset);
+ dev_priv->current_page = 0;
+ }
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ return 0;
+}
+
+static int i915_quiescent(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ i915_kernel_lost_context(dev);
+ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+
+ if (ret)
+ {
+ i915_kernel_lost_context (dev);
+ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
+ dev_priv->ring.head,
+ dev_priv->ring.tail,
+ dev_priv->ring.space);
+ }
+ return ret;
+}
+
+/*ARGSUSED*/
+static int i915_flush_ioctl(DRM_IOCTL_ARGS)
+{
+ int ret;
+ DRM_DEVICE;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ spin_lock(&dev->struct_mutex);
+ ret = i915_quiescent(dev);
+ spin_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/*ARGSUSED*/
+static int i915_batchbuffer(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_i915_batchbuffer_t batch;
+ int ret;
+
+ if (!dev_priv->allow_batchbuffer) {
+ DRM_ERROR("Batchbuffer ioctl disabled\n");
+ return (EINVAL);
+ }
+
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_i915_batchbuffer32_t batchbuffer32_t;
+
+ DRM_COPYFROM_WITH_RETURN(&batchbuffer32_t,
+ (void *) data, sizeof (batchbuffer32_t));
+
+ batch.start = batchbuffer32_t.start;
+ batch.used = batchbuffer32_t.used;
+ batch.DR1 = batchbuffer32_t.DR1;
+ batch.DR4 = batchbuffer32_t.DR4;
+ batch.num_cliprects = batchbuffer32_t.num_cliprects;
+ batch.cliprects = (drm_clip_rect_t __user *)
+ (uintptr_t)batchbuffer32_t.cliprects;
+ } else
+ DRM_COPYFROM_WITH_RETURN(&batch, (void *) data,
+ sizeof(batch));
+
+ DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d, counter %d\n",
+ batch.start, batch.used, batch.num_cliprects, dev_priv->counter);
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+/*
+ if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
+ batch.num_cliprects *
+ sizeof(drm_clip_rect_t)))
+ return (EFAULT);
+
+*/
+
+ spin_lock(&dev->struct_mutex);
+ ret = i915_dispatch_batchbuffer(dev, &batch);
+ spin_unlock(&dev->struct_mutex);
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+ return ret;
+}
+
+/*ARGSUSED*/
+static int i915_cmdbuffer(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_i915_cmdbuffer_t cmdbuf;
+ int ret;
+
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_i915_cmdbuffer32_t cmdbuffer32_t;
+
+ DRM_COPYFROM_WITH_RETURN(&cmdbuffer32_t,
+ (drm_i915_cmdbuffer32_t __user *) data,
+ sizeof (drm_i915_cmdbuffer32_t));
+
+ cmdbuf.buf = (char __user *)(uintptr_t)cmdbuffer32_t.buf;
+ cmdbuf.sz = cmdbuffer32_t.sz;
+ cmdbuf.DR1 = cmdbuffer32_t.DR1;
+ cmdbuf.DR4 = cmdbuffer32_t.DR4;
+ cmdbuf.num_cliprects = cmdbuffer32_t.num_cliprects;
+ cmdbuf.cliprects = (drm_clip_rect_t __user *)
+ (uintptr_t)cmdbuffer32_t.cliprects;
+ } else
+ DRM_COPYFROM_WITH_RETURN(&cmdbuf, (void *) data,
+ sizeof(cmdbuf));
+
+ DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+ cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+/*
+ if (cmdbuf.num_cliprects &&
+ DRM_VERIFYAREA_READ(cmdbuf.cliprects,
+ cmdbuf.num_cliprects *
+ sizeof(drm_clip_rect_t))) {
+ DRM_ERROR("Fault accessing cliprects\n");
+ return (EFAULT);
+ }
+*/
+
+ spin_lock(&dev->struct_mutex);
+ ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
+ spin_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+ return ret;
+ }
+
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+}
+
+/*ARGSUSED*/
+static int i915_flip_bufs(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_flip_t param;
+ int ret;
+ DRM_COPYFROM_WITH_RETURN(&param, (drm_i915_flip_t *) data,
+ sizeof(param));
+
+ DRM_DEBUG("i915_flip_bufs\n");
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ spin_lock(&dev->struct_mutex);
+ ret = i915_dispatch_flip(dev, param.pipes);
+ spin_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/*ARGSUSED*/
+static int i915_getparam(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_getparam_t param;
+ int value;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_i915_getparam32_t getparam32_t;
+
+ DRM_COPYFROM_WITH_RETURN(&getparam32_t,
+ (drm_i915_getparam32_t __user *) data,
+ sizeof (drm_i915_getparam32_t));
+
+ param.param = getparam32_t.param;
+ param.value = (int __user *)(uintptr_t)getparam32_t.value;
+ } else
+ DRM_COPYFROM_WITH_RETURN(&param,
+ (drm_i915_getparam_t *) data, sizeof(param));
+
+ switch (param.param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ value = dev->irq_enabled ? 1 : 0;
+ break;
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ value = dev_priv->allow_batchbuffer ? 1 : 0;
+ break;
+ case I915_PARAM_LAST_DISPATCH:
+ value = READ_BREADCRUMB(dev_priv);
+ break;
+ case I915_PARAM_CHIPSET_ID:
+ value = dev->pci_device;
+ break;
+ case I915_PARAM_HAS_GEM:
+ value = dev->driver->use_gem;
+ break;
+ default:
+ DRM_ERROR("Unknown get parameter %d\n", param.param);
+ return (EINVAL);
+ }
+
+ if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
+ DRM_ERROR("i915_getparam failed\n");
+ return (EFAULT);
+ }
+ return 0;
+}
+
+/*ARGSUSED*/
+static int i915_setparam(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_setparam_t param;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&param, (drm_i915_setparam_t *) data,
+ sizeof(param));
+
+ switch (param.param) {
+ case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+ break;
+ case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+ dev_priv->tex_lru_log_granularity = param.value;
+ break;
+ case I915_SETPARAM_ALLOW_BATCHBUFFER:
+ dev_priv->allow_batchbuffer = param.value;
+ break;
+ default:
+ DRM_ERROR("unknown set parameter %d\n", param.param);
+ return (EINVAL);
+ }
+
+ return 0;
+}
+
+/*ARGSUSED*/
+static int i915_set_status_page(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_hws_addr_t hws;
+
+ if (!I915_NEED_GFX_HWS(dev))
+ return (EINVAL);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+ DRM_COPYFROM_WITH_RETURN(&hws, (drm_i915_hws_addr_t __user *) data,
+ sizeof(hws));
+DRM_ERROR("i915_set_status_page set status page addr 0x%08x\n", (u32)hws.addr);
+
+ dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
+ DRM_DEBUG("set gfx_addr 0x%08x\n", dev_priv->status_gfx_addr);
+
+ dev_priv->hws_map.offset =
+ (u_offset_t)dev->agp->agp_info.agpi_aperbase + hws.addr;
+ dev_priv->hws_map.size = 4 * 1024; /* 4K pages */
+ dev_priv->hws_map.type = 0;
+ dev_priv->hws_map.flags = 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ DRM_DEBUG("set status page: i915_set_status_page: mapoffset 0x%llx\n",
+ dev_priv->hws_map.offset);
+ drm_core_ioremap(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ dev->dev_private = (void *)dev_priv;
+ (void) i915_dma_cleanup(dev);
+ dev_priv->status_gfx_addr = 0;
+ DRM_ERROR("can not ioremap virtual address for"
+ " G33 hw status page\n");
+ return (ENOMEM);
+ }
+ dev_priv->hw_status_page = dev_priv->hws_map.dev_addr;
+
+ (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
+ dev_priv->status_gfx_addr);
+ DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+ return 0;
+}
+
+/*ARGSUSED*/
+int i915_driver_load(drm_device_t *dev, unsigned long flags)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long base, size;
+ int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+
+ /* i915 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return ENOMEM;
+
+ (void) memset(dev_priv, 0, sizeof(drm_i915_private_t));
+ dev->dev_private = (void *)dev_priv;
+ dev_priv->dev = dev;
+
+ /* Add register map (needed for suspend/resume) */
+
+ base = drm_get_resource_start(dev, mmio_bar);
+ size = drm_get_resource_len(dev, mmio_bar);
+ dev_priv->mmio_map = drm_alloc(sizeof (drm_local_map_t), DRM_MEM_MAPS);
+ dev_priv->mmio_map->offset = base;
+ dev_priv->mmio_map->size = size;
+ dev_priv->mmio_map->type = _DRM_REGISTERS;
+ dev_priv->mmio_map->flags = _DRM_REMOVABLE;
+ (void) drm_ioremap(dev, dev_priv->mmio_map);
+
+ DRM_DEBUG("i915_driverload mmio %p mmio_map->dev_addr %x", dev_priv->mmio_map, dev_priv->mmio_map->dev_addr);
+
+#if defined(__i386)
+ dev->driver->use_gem = 0;
+#else
+ if (IS_I965G(dev)) {
+ dev->driver->use_gem = 1;
+ } else {
+ dev->driver->use_gem = 0;
+ }
+#endif /* __i386 */
+
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+#if defined(__i386)
+ if (IS_G4X(dev) || IS_IGDNG(dev) || IS_GM45(dev))
+#else
+ if (IS_G4X(dev) || IS_IGDNG(dev))
+#endif
+ {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+
+#ifdef I915_HAVE_GEM
+ i915_gem_load(dev);
+#endif
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = i915_init_hardware_status(dev);
+ if(ret)
+ return ret;
+ }
+
+ mutex_init(&dev_priv->user_irq_lock, "userirq", MUTEX_DRIVER, NULL);
+ mutex_init(&dev_priv->error_lock, "error_lock", MUTEX_DRIVER, NULL);
+
+ ret = drm_vblank_init(dev, I915_NUM_PIPE);
+ if (ret) {
+ (void) i915_driver_unload(dev);
+ return ret;
+ }
+
+ return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ i915_free_hardware_status(dev);
+
+ drm_rmmap(dev, dev_priv->mmio_map);
+
+ mutex_destroy(&dev_priv->user_irq_lock);
+
+ drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+ DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+/*ARGSUSED*/
+int i915_driver_open(drm_device_t * dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv;
+
+ DRM_DEBUG("\n");
+ i915_file_priv = (struct drm_i915_file_private *)
+ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+
+ if (!i915_file_priv)
+ return -ENOMEM;
+
+ file_priv->driver_priv = i915_file_priv;
+
+ i915_file_priv->mm.last_gem_seqno = 0;
+ i915_file_priv->mm.last_gem_throttle_seqno = 0;
+
+ return 0;
+}
+
+void i915_driver_lastclose(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* agp off can use this to get called before dev_priv */
+ if (!dev_priv)
+ return;
+
+#ifdef I915_HAVE_GEM
+ i915_gem_lastclose(dev);
+#endif
+
+ DRM_GETSAREA();
+ if (dev_priv->agp_heap)
+ i915_mem_takedown(&(dev_priv->agp_heap));
+ (void) i915_dma_cleanup(dev);
+}
+
+void i915_driver_preclose(drm_device_t * dev, drm_file_t *fpriv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ i915_mem_release(dev, fpriv, dev_priv->agp_heap);
+}
+
+/*ARGSUSED*/
+void i915_driver_postclose(drm_device_t * dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
+drm_ioctl_desc_t i915_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_I915_INIT)] =
+ {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_FLUSH)] =
+ {i915_flush_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_FLIP)] =
+ {i915_flip_bufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] =
+ {i915_batchbuffer, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] =
+ {i915_irq_emit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] =
+ {i915_irq_wait, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GETPARAM)] =
+ {i915_getparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_SETPARAM)] =
+ {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_ALLOC)] =
+ {i915_mem_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_FREE)] =
+ {i915_mem_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] =
+ {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] =
+ {i915_cmdbuffer, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] =
+ {i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] =
+ {i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] =
+ {i915_vblank_pipe_get, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] =
+ {i915_vblank_swap, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] =
+ {i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+#ifdef I915_HAVE_GEM
+ [DRM_IOCTL_NR(DRM_I915_GEM_INIT)] =
+ {i915_gem_init_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GEM_EXECBUFFER)] =
+ {i915_gem_execbuffer, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GEM_PIN)] =
+ {i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_GEM_UNPIN)] =
+ {i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_GEM_BUSY)] =
+ {i915_gem_busy_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GEM_THROTTLE)] =
+ {i915_gem_throttle_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GEM_ENTERVT)] =
+ {i915_gem_entervt_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GEM_LEAVEVT)] =
+ {i915_gem_leavevt_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GEM_CREATE)] =
+ {i915_gem_create_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_PREAD)] =
+ {i915_gem_pread_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_PWRITE)] =
+ {i915_gem_pwrite_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_MMAP)] =
+ {i915_gem_mmap_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_SET_DOMAIN)] =
+ {i915_gem_set_domain_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_SW_FINISH)] =
+ {i915_gem_sw_finish_ioctl, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_SET_TILING)] =
+ {i915_gem_set_tiling, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_GET_TILING)] =
+ {i915_gem_get_tiling, 0},
+ [DRM_IOCTL_NR(DRM_I915_GEM_GET_APERTURE)] =
+ {i915_gem_get_aperture_ioctl, 0},
+#endif
+};
+
+int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i9x5 is AGP.
+ */
+/*ARGSUSED*/
+int i915_driver_device_is_agp(drm_device_t * dev)
+{
+ return 1;
+}
+
diff --git a/usr/src/uts/intel/io/drm/i915_drm.h b/usr/src/uts/intel/io/drm/i915_drm.h
new file mode 100644
index 0000000..e6b967a
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_drm.h
@@ -0,0 +1,742 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _I915_DRM_H
+#define _I915_DRM_H
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#include "drm.h"
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
+ * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct _drm_i915_init {
+ enum {
+ I915_INIT_DMA = 0x01,
+ I915_CLEANUP_DMA = 0x02,
+ I915_RESUME_DMA = 0x03
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+} drm_i915_init_t;
+
+typedef struct _drm_i915_sarea {
+ drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+ int pad0;
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int pipeA_x;
+ int pipeA_y;
+ int pipeA_w;
+ int pipeA_h;
+ int pipeB_x;
+ int pipeB_y;
+ int pipeB_w;
+ int pipeB_h;
+
+ int pad1;
+ /* Triple buffering */
+ drm_handle_t third_handle;
+ int third_offset;
+ int third_size;
+ unsigned int third_tiled;
+
+ unsigned int front_bo_handle;
+ unsigned int back_bo_handle;
+ unsigned int third_bo_handle;
+ unsigned int depth_bo_handle;
+} drm_i915_sarea_t;
+
+/* Driver specific fence types and classes.
+ */
+
+/* The only fence class we support */
+#define DRM_I915_FENCE_CLASS_ACCEL 0
+/* Fence type that guarantees read-write flush */
+#define DRM_I915_FENCE_TYPE_RW 2
+/* MI_FLUSH programmed just before the fence */
+#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY 0x1
+#define I915_BOX_FLIP 0x2
+#define I915_BOX_WAIT 0x4
+#define I915_BOX_TEXTURE_LOAD 0x8
+#define I915_BOX_LOST_CONTEXT 0x10
+
+/* I915 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_I915_INIT 0x00
+#define DRM_I915_FLUSH 0x01
+#define DRM_I915_FLIP 0x02
+#define DRM_I915_BATCHBUFFER 0x03
+#define DRM_I915_IRQ_EMIT 0x04
+#define DRM_I915_IRQ_WAIT 0x05
+#define DRM_I915_GETPARAM 0x06
+#define DRM_I915_SETPARAM 0x07
+#define DRM_I915_ALLOC 0x08
+#define DRM_I915_FREE 0x09
+#define DRM_I915_INIT_HEAP 0x0a
+#define DRM_I915_CMDBUFFER 0x0b
+#define DRM_I915_DESTROY_HEAP 0x0c
+#define DRM_I915_SET_VBLANK_PIPE 0x0d
+#define DRM_I915_GET_VBLANK_PIPE 0x0e
+#define DRM_I915_VBLANK_SWAP 0x0f
+#define DRM_I915_HWS_ADDR 0x11
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT 0x24
+
+#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+
+/* Asynchronous page flipping:
+ */
+typedef struct drm_i915_flip {
+ /*
+ * This is really talking about planes, and we could rename it
+ * except for the fact that some of the duplicated i915_drm.h files
+ * out there check for HAVE_I915_FLIP and so might pick up this
+ * version.
+ */
+ int pipes;
+} drm_i915_flip_t;
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct _drm_i915_batchbuffer {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+typedef struct _drm_i915_batchbuffer32 {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ caddr32_t cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer32_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct _drm_i915_cmdbuffer {
+ char __user *buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+typedef struct _drm_i915_cmdbuffer32 {
+ caddr32_t buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ caddr32_t cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer32_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+ int __user *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_emit32 {
+ caddr32_t irq_seq;
+} drm_i915_irq_emit32_t;
+
+typedef struct drm_i915_irq_wait {
+ int irq_seq;
+} drm_i915_irq_wait_t;
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
+
+typedef struct drm_i915_getparam {
+ int param;
+ int __user *value;
+} drm_i915_getparam_t;
+
+typedef struct drm_i915_getparam32 {
+ int param;
+ caddr32_t value;
+} drm_i915_getparam32_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+
+typedef struct drm_i915_setparam {
+ int param;
+ int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int __user *region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_alloc32 {
+ int region;
+ int alignment;
+ int size;
+ caddr32_t region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc32_t;
+
+typedef struct drm_i915_mem_free {
+ int region;
+ int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+ int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define DRM_I915_VBLANK_PIPE_A 1
+#define DRM_I915_VBLANK_PIPE_B 2
+
+typedef struct drm_i915_vblank_pipe {
+ int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+ drm_drawable_t drawable;
+ drm_vblank_seq_type_t seqtype;
+ unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+#define I915_MMIO_READ 0
+#define I915_MMIO_WRITE 1
+
+#define I915_MMIO_MAY_READ 0x1
+#define I915_MMIO_MAY_WRITE 0x2
+
+#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
+#define MMIO_REGS_IA_VERTICES_COUNT 1
+#define MMIO_REGS_VS_INVOCATION_COUNT 2
+#define MMIO_REGS_GS_PRIMITIVES_COUNT 3
+#define MMIO_REGS_GS_INVOCATION_COUNT 4
+#define MMIO_REGS_CL_PRIMITIVES_COUNT 5
+#define MMIO_REGS_CL_INVOCATION_COUNT 6
+#define MMIO_REGS_PS_INVOCATION_COUNT 7
+#define MMIO_REGS_PS_DEPTH_COUNT 8
+
+typedef struct drm_i915_mmio_entry {
+ unsigned int flag;
+ unsigned int offset;
+ unsigned int size;
+} drm_i915_mmio_entry_t;
+
+typedef struct drm_i915_mmio {
+ unsigned int read_write:1;
+ unsigned int reg:31;
+ void __user *data;
+} drm_i915_mmio_t;
+
+typedef struct drm_i915_hws_addr {
+ uint64_t addr;
+} drm_i915_hws_addr_t;
+
+
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_end;
+
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /**
+ * Pointer to write the data into.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t data_ptr;
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /**
+ * Pointer to read the data from.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t data_ptr;
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /**
+ * Returned pointer the data was mapped at.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t addr_ptr;
+};
+
+struct drm_i915_gem_mmap_gtt {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ uint32_t handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ uint32_t delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** Number of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ uint64_t relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ uint64_t cliprects_ptr;
+};
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+};
+
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ uint32_t handle;
+
+ /** Return busy status (1 if busy, 0 if idle) */
+ uint32_t busy;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ uint32_t handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ uint32_t stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ uint32_t handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_aperture {
+ /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+ uint64_t aper_size;
+
+ /**
+ * Available space in the aperture used by i915_gem_execbuffer, in
+ * bytes
+ */
+ uint64_t aper_available_size;
+};
+
+#endif /* _I915_DRM_H */
diff --git a/usr/src/uts/intel/io/drm/i915_drv.c b/usr/src/uts/intel/io/drm/i915_drv.c
new file mode 100644
index 0000000..8989e1a
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_drv.c
@@ -0,0 +1,1047 @@
+/* BEGIN CSTYLED */
+
+/*
+ * i915_drv.c -- Intel i915 driver -*- linux-c -*-
+ * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Copyright 2014 RackTop Systems.
+ */
+
+/*
+ * I915 DRM Driver for Solaris
+ *
+ * This driver provides the hardware 3D acceleration support for Intel
+ * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
+ * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
+ * means the kernel device driver in DRI.
+ *
+ * I915 driver is a device dependent driver only, it depends on a misc module
+ * named drm for generic DRM operations.
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "drm_pciids.h"
+
+/*
+ * copied from vgasubr.h
+ */
+
+struct vgaregmap {
+ uint8_t *addr;
+ ddi_acc_handle_t handle;
+ boolean_t mapped;
+};
+
+enum pipe {
+ PIPE_A = 0,
+ PIPE_B,
+};
+
+
+/*
+ * cb_ops entrypoint
+ */
+extern struct cb_ops drm_cb_ops;
+
+/*
+ * module entrypoint
+ */
+static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
+static int i915_attach(dev_info_t *, ddi_attach_cmd_t);
+static int i915_detach(dev_info_t *, ddi_detach_cmd_t);
+
+
+/* drv_PCI_IDs comes from drm_pciids.h */
+static drm_pci_id_list_t i915_pciidlist[] = {
+ i915_PCI_IDS
+};
+
+/*
+ * Local routines
+ */
+static void i915_configure(drm_driver_t *);
+static int i915_quiesce(dev_info_t *dip);
+
+/*
+ * DRM driver
+ */
+static drm_driver_t i915_driver = {0};
+
+
+static struct dev_ops i915_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ i915_info, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ i915_attach, /* devo_attach */
+ i915_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &drm_cb_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ NULL, /* power */
+ i915_quiesce, /* devo_quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* drv_modops */
+ "I915 DRM driver", /* drv_linkinfo */
+ &i915_dev_ops, /* drv_dev_ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *) &modldrv, NULL
+};
+
+static ddi_device_acc_attr_t s3_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */
+};
+
+/*
+ * softstate head
+ */
+static void *i915_statep;
+
+int
+_init(void)
+{
+ int error;
+
+ i915_configure(&i915_driver);
+
+ if ((error = ddi_soft_state_init(&i915_statep,
+ sizeof (drm_device_t), DRM_MAX_INSTANCES)) != 0)
+ return (error);
+
+ if ((error = mod_install(&modlinkage)) != 0) {
+ ddi_soft_state_fini(&i915_statep);
+ return (error);
+ }
+
+ return (error);
+
+} /* _init() */
+
+int
+_fini(void)
+{
+ int error;
+
+ if ((error = mod_remove(&modlinkage)) != 0)
+ return (error);
+
+ (void) ddi_soft_state_fini(&i915_statep);
+
+ return (0);
+
+} /* _fini() */
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+
+} /* _info() */
+
+/*
+ * off range: 0x3b0 ~ 0x3ff
+ */
+
+static void
+vga_reg_put8(struct vgaregmap *regmap, uint16_t off, uint8_t val)
+{
+ ASSERT((off >= 0x3b0) && (off <= 0x3ff));
+
+ ddi_put8(regmap->handle, regmap->addr + off, val);
+}
+
+/*
+ * off range: 0x3b0 ~ 0x3ff
+ */
+static uint8_t
+vga_reg_get8(struct vgaregmap *regmap, uint16_t off)
+{
+
+ ASSERT((off >= 0x3b0) && (off <= 0x3ff));
+
+ return (ddi_get8(regmap->handle, regmap->addr + off));
+}
+
+static void
+i915_write_indexed(struct vgaregmap *regmap,
+ uint16_t index_port, uint16_t data_port, uint8_t index, uint8_t val)
+{
+ vga_reg_put8(regmap, index_port, index);
+ vga_reg_put8(regmap, data_port, val);
+}
+
+static uint8_t
+i915_read_indexed(struct vgaregmap *regmap,
+ uint16_t index_port, uint16_t data_port, uint8_t index)
+{
+ vga_reg_put8(regmap, index_port, index);
+ return (vga_reg_get8(regmap, data_port));
+}
+
+static void
+i915_write_ar(struct vgaregmap *regmap, uint16_t st01,
+ uint8_t reg, uint8_t val, uint8_t palette_enable)
+{
+ (void) vga_reg_get8(regmap, st01);
+ vga_reg_put8(regmap, VGA_AR_INDEX, palette_enable | reg);
+ vga_reg_put8(regmap, VGA_AR_DATA_WRITE, val);
+}
+
+static uint8_t
+i915_read_ar(struct vgaregmap *regmap, uint16_t st01,
+ uint8_t index, uint8_t palette_enable)
+{
+ (void) vga_reg_get8(regmap, st01);
+ vga_reg_put8(regmap, VGA_AR_INDEX, index | palette_enable);
+ return (vga_reg_get8(regmap, VGA_AR_DATA_READ));
+}
+
+static int
+i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct s3_i915_private *s3_priv = dev->s3_private;
+
+ if (pipe == PIPE_A)
+ return (S3_READ(DPLL_A) & DPLL_VCO_ENABLE);
+ else
+ return (S3_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void
+i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct s3_i915_private *s3_priv = dev->s3_private;
+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ uint32_t *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (pipe == PIPE_A)
+ array = s3_priv->save_palette_a;
+ else
+ array = s3_priv->save_palette_b;
+
+ for(i = 0; i < 256; i++)
+ array[i] = S3_READ(reg + (i << 2));
+
+}
+
+static void
+i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct s3_i915_private *s3_priv = dev->s3_private;
+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ uint32_t *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (pipe == PIPE_A)
+ array = s3_priv->save_palette_a;
+ else
+ array = s3_priv->save_palette_b;
+
+ for(i = 0; i < 256; i++)
+ S3_WRITE(reg + (i << 2), array[i]);
+}
+
+static void
+i915_save_vga(struct drm_device *dev)
+{
+ struct s3_i915_private *s3_priv = dev->s3_private;
+ int i;
+ uint16_t cr_index, cr_data, st01;
+ struct vgaregmap regmap;
+
+ regmap.addr = (uint8_t *)s3_priv->saveAddr;
+ regmap.handle = s3_priv->saveHandle;
+
+ /* VGA color palette registers */
+ s3_priv->saveDACMASK = vga_reg_get8(&regmap, VGA_DACMASK);
+ /* DACCRX automatically increments during read */
+ vga_reg_put8(&regmap, VGA_DACRX, 0);
+ /* Read 3 bytes of color data from each index */
+ for (i = 0; i < 256 * 3; i++)
+ s3_priv->saveDACDATA[i] = vga_reg_get8(&regmap, VGA_DACDATA);
+
+ /* MSR bits */
+ s3_priv->saveMSR = vga_reg_get8(&regmap, VGA_MSR_READ);
+ if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* CRT controller regs */
+ i915_write_indexed(&regmap, cr_index, cr_data, 0x11,
+ i915_read_indexed(&regmap, cr_index, cr_data, 0x11) & (~0x80));
+ for (i = 0; i <= 0x24; i++)
+ s3_priv->saveCR[i] =
+ i915_read_indexed(&regmap, cr_index, cr_data, i);
+ /* Make sure we don't turn off CR group 0 writes */
+ s3_priv->saveCR[0x11] &= ~0x80;
+
+ /* Attribute controller registers */
+ (void) vga_reg_get8(&regmap, st01);
+ s3_priv->saveAR_INDEX = vga_reg_get8(&regmap, VGA_AR_INDEX);
+ for (i = 0; i <= 0x14; i++)
+ s3_priv->saveAR[i] = i915_read_ar(&regmap, st01, i, 0);
+ (void) vga_reg_get8(&regmap, st01);
+ vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX);
+ (void) vga_reg_get8(&regmap, st01);
+
+ /* Graphics controller registers */
+ for (i = 0; i < 9; i++)
+ s3_priv->saveGR[i] =
+ i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i);
+
+ s3_priv->saveGR[0x10] =
+ i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+ s3_priv->saveGR[0x11] =
+ i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+ s3_priv->saveGR[0x18] =
+ i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+ /* Sequencer registers */
+ for (i = 0; i < 8; i++)
+ s3_priv->saveSR[i] =
+ i915_read_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void
+i915_restore_vga(struct drm_device *dev)
+{
+ struct s3_i915_private *s3_priv = dev->s3_private;
+ int i;
+ uint16_t cr_index, cr_data, st01;
+ struct vgaregmap regmap;
+
+ regmap.addr = (uint8_t *)s3_priv->saveAddr;
+ regmap.handle = s3_priv->saveHandle;
+
+ /*
+ * I/O Address Select. This bit selects 3Bxh or 3Dxh as the
+ * I/O address for the CRT Controller registers,
+ * the Feature Control Register (FCR), and Input Status Register
+ * 1 (ST01). Presently ignored (whole range is claimed), but
+ * will "ignore" 3Bx for color configuration or 3Dx for monochrome.
+ * Note that it is typical in AGP chipsets to shadow this bit
+ * and properly steer I/O cycles to the proper bus for operation
+ * where a MDA exists on another bus such as ISA.
+ * 0 = Select 3Bxh I/O address (MDA emulation) (default).
+ * 1 = Select 3Dxh I/O address (CGA emulation).
+ */
+ vga_reg_put8(&regmap, VGA_MSR_WRITE, s3_priv->saveMSR);
+
+ if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* Sequencer registers, don't write SR07 */
+ for (i = 0; i < 7; i++)
+ i915_write_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i,
+ s3_priv->saveSR[i]);
+ /* CRT controller regs */
+ /* Enable CR group 0 writes */
+ i915_write_indexed(&regmap, cr_index, cr_data,
+ 0x11, s3_priv->saveCR[0x11]);
+ for (i = 0; i <= 0x24; i++)
+ i915_write_indexed(&regmap, cr_index,
+ cr_data, i, s3_priv->saveCR[i]);
+
+ /* Graphics controller regs */
+ for (i = 0; i < 9; i++)
+ i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i,
+ s3_priv->saveGR[i]);
+
+ i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+ s3_priv->saveGR[0x10]);
+ i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+ s3_priv->saveGR[0x11]);
+ i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+ s3_priv->saveGR[0x18]);
+
+ /* Attribute controller registers */
+ (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
+ for (i = 0; i <= 0x14; i++)
+ i915_write_ar(&regmap, st01, i, s3_priv->saveAR[i], 0);
+ (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
+ vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX | 0x20);
+ (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
+
+ /* VGA color palette registers */
+ vga_reg_put8(&regmap, VGA_DACMASK, s3_priv->saveDACMASK);
+ /* DACCRX automatically increments during read */
+ vga_reg_put8(&regmap, VGA_DACWX, 0);
+ /* Read 3 bytes of color data from each index */
+ for (i = 0; i < 256 * 3; i++)
+ vga_reg_put8(&regmap, VGA_DACDATA, s3_priv->saveDACDATA[i]);
+}
+
+/**
+ * i915_save_display - save display & mode info
+ * @dev: DRM device
+ *
+ * Save mode timings and display info.
+ */
+void i915_save_display(struct drm_device *dev)
+{
+ struct s3_i915_private *s3_priv = dev->s3_private;
+
+ /* Display arbitration control */
+ s3_priv->saveDSPARB = S3_READ(DSPARB);
+
+ /*
+ * Pipe & plane A info.
+ */
+ s3_priv->savePIPEACONF = S3_READ(PIPEACONF);
+ s3_priv->savePIPEASRC = S3_READ(PIPEASRC);
+ s3_priv->saveFPA0 = S3_READ(FPA0);
+ s3_priv->saveFPA1 = S3_READ(FPA1);
+ s3_priv->saveDPLL_A = S3_READ(DPLL_A);
+ if (IS_I965G(dev))
+ s3_priv->saveDPLL_A_MD = S3_READ(DPLL_A_MD);
+ s3_priv->saveHTOTAL_A = S3_READ(HTOTAL_A);
+ s3_priv->saveHBLANK_A = S3_READ(HBLANK_A);
+ s3_priv->saveHSYNC_A = S3_READ(HSYNC_A);
+ s3_priv->saveVTOTAL_A = S3_READ(VTOTAL_A);
+ s3_priv->saveVBLANK_A = S3_READ(VBLANK_A);
+ s3_priv->saveVSYNC_A = S3_READ(VSYNC_A);
+ s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
+
+ s3_priv->saveDSPACNTR = S3_READ(DSPACNTR);
+ s3_priv->saveDSPASTRIDE = S3_READ(DSPASTRIDE);
+ s3_priv->saveDSPASIZE = S3_READ(DSPASIZE);
+ s3_priv->saveDSPAPOS = S3_READ(DSPAPOS);
+ s3_priv->saveDSPABASE = S3_READ(DSPABASE);
+ if (IS_I965G(dev)) {
+ s3_priv->saveDSPASURF = S3_READ(DSPASURF);
+ s3_priv->saveDSPATILEOFF = S3_READ(DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ s3_priv->savePIPEASTAT = S3_READ(PIPEASTAT);
+
+ /*
+ * Pipe & plane B info
+ */
+ s3_priv->savePIPEBCONF = S3_READ(PIPEBCONF);
+ s3_priv->savePIPEBSRC = S3_READ(PIPEBSRC);
+ s3_priv->saveFPB0 = S3_READ(FPB0);
+ s3_priv->saveFPB1 = S3_READ(FPB1);
+ s3_priv->saveDPLL_B = S3_READ(DPLL_B);
+ if (IS_I965G(dev))
+ s3_priv->saveDPLL_B_MD = S3_READ(DPLL_B_MD);
+ s3_priv->saveHTOTAL_B = S3_READ(HTOTAL_B);
+ s3_priv->saveHBLANK_B = S3_READ(HBLANK_B);
+ s3_priv->saveHSYNC_B = S3_READ(HSYNC_B);
+ s3_priv->saveVTOTAL_B = S3_READ(VTOTAL_B);
+ s3_priv->saveVBLANK_B = S3_READ(VBLANK_B);
+ s3_priv->saveVSYNC_B = S3_READ(VSYNC_B);
+ s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
+
+ s3_priv->saveDSPBCNTR = S3_READ(DSPBCNTR);
+ s3_priv->saveDSPBSTRIDE = S3_READ(DSPBSTRIDE);
+ s3_priv->saveDSPBSIZE = S3_READ(DSPBSIZE);
+ s3_priv->saveDSPBPOS = S3_READ(DSPBPOS);
+ s3_priv->saveDSPBBASE = S3_READ(DSPBBASE);
+ if (IS_I965GM(dev) || IS_GM45(dev)) {
+ s3_priv->saveDSPBSURF = S3_READ(DSPBSURF);
+ s3_priv->saveDSPBTILEOFF = S3_READ(DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ s3_priv->savePIPEBSTAT = S3_READ(PIPEBSTAT);
+
+ /*
+ * CRT state
+ */
+ s3_priv->saveADPA = S3_READ(ADPA);
+
+ /*
+ * LVDS state
+ */
+ s3_priv->savePP_CONTROL = S3_READ(PP_CONTROL);
+ s3_priv->savePFIT_PGM_RATIOS = S3_READ(PFIT_PGM_RATIOS);
+ s3_priv->saveBLC_PWM_CTL = S3_READ(BLC_PWM_CTL);
+ if (IS_I965G(dev))
+ s3_priv->saveBLC_PWM_CTL2 = S3_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ s3_priv->saveLVDS = S3_READ(LVDS);
+ if (!IS_I830(dev) && !IS_845G(dev))
+ s3_priv->savePFIT_CONTROL = S3_READ(PFIT_CONTROL);
+ s3_priv->saveLVDSPP_ON = S3_READ(LVDSPP_ON);
+ s3_priv->saveLVDSPP_OFF = S3_READ(LVDSPP_OFF);
+ s3_priv->savePP_CYCLE = S3_READ(PP_CYCLE);
+
+ /* FIXME: save TV & SDVO state */
+
+ /* FBC state */
+ s3_priv->saveFBC_CFB_BASE = S3_READ(FBC_CFB_BASE);
+ s3_priv->saveFBC_LL_BASE = S3_READ(FBC_LL_BASE);
+ s3_priv->saveFBC_CONTROL2 = S3_READ(FBC_CONTROL2);
+ s3_priv->saveFBC_CONTROL = S3_READ(FBC_CONTROL);
+
+ /* VGA state */
+ s3_priv->saveVCLK_DIVISOR_VGA0 = S3_READ(VCLK_DIVISOR_VGA0);
+ s3_priv->saveVCLK_DIVISOR_VGA1 = S3_READ(VCLK_DIVISOR_VGA1);
+ s3_priv->saveVCLK_POST_DIV = S3_READ(VCLK_POST_DIV);
+ s3_priv->saveVGACNTRL = S3_READ(VGACNTRL);
+
+ i915_save_vga(dev);
+}
+
+void i915_restore_display(struct drm_device *dev)
+{
+ struct s3_i915_private *s3_priv = dev->s3_private;
+
+ S3_WRITE(DSPARB, s3_priv->saveDSPARB);
+
+ /*
+ * Pipe & plane A info
+ * Prime the clock
+ */
+ if (s3_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+ S3_WRITE(DPLL_A, s3_priv->saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ drv_usecwait(150);
+ }
+ S3_WRITE(FPA0, s3_priv->saveFPA0);
+ S3_WRITE(FPA1, s3_priv->saveFPA1);
+ /* Actually enable it */
+ S3_WRITE(DPLL_A, s3_priv->saveDPLL_A);
+ drv_usecwait(150);
+ if (IS_I965G(dev))
+ S3_WRITE(DPLL_A_MD, s3_priv->saveDPLL_A_MD);
+ drv_usecwait(150);
+
+ /* Restore mode */
+ S3_WRITE(HTOTAL_A, s3_priv->saveHTOTAL_A);
+ S3_WRITE(HBLANK_A, s3_priv->saveHBLANK_A);
+ S3_WRITE(HSYNC_A, s3_priv->saveHSYNC_A);
+ S3_WRITE(VTOTAL_A, s3_priv->saveVTOTAL_A);
+ S3_WRITE(VBLANK_A, s3_priv->saveVBLANK_A);
+ S3_WRITE(VSYNC_A, s3_priv->saveVSYNC_A);
+ S3_WRITE(BCLRPAT_A, s3_priv->saveBCLRPAT_A);
+
+ /* Restore plane info */
+ S3_WRITE(DSPASIZE, s3_priv->saveDSPASIZE);
+ S3_WRITE(DSPAPOS, s3_priv->saveDSPAPOS);
+ S3_WRITE(PIPEASRC, s3_priv->savePIPEASRC);
+ S3_WRITE(DSPABASE, s3_priv->saveDSPABASE);
+ S3_WRITE(DSPASTRIDE, s3_priv->saveDSPASTRIDE);
+ if (IS_I965G(dev)) {
+ S3_WRITE(DSPASURF, s3_priv->saveDSPASURF);
+ S3_WRITE(DSPATILEOFF, s3_priv->saveDSPATILEOFF);
+ }
+ S3_WRITE(PIPEACONF, s3_priv->savePIPEACONF);
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ S3_WRITE(DSPACNTR, s3_priv->saveDSPACNTR);
+ S3_WRITE(DSPABASE, S3_READ(DSPABASE));
+
+ /* Pipe & plane B info */
+ if (s3_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+ S3_WRITE(DPLL_B, s3_priv->saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ drv_usecwait(150);
+ }
+ S3_WRITE(FPB0, s3_priv->saveFPB0);
+ S3_WRITE(FPB1, s3_priv->saveFPB1);
+ /* Actually enable it */
+ S3_WRITE(DPLL_B, s3_priv->saveDPLL_B);
+ drv_usecwait(150);
+ if (IS_I965G(dev))
+ S3_WRITE(DPLL_B_MD, s3_priv->saveDPLL_B_MD);
+ drv_usecwait(150);
+
+ /* Restore mode */
+ S3_WRITE(HTOTAL_B, s3_priv->saveHTOTAL_B);
+ S3_WRITE(HBLANK_B, s3_priv->saveHBLANK_B);
+ S3_WRITE(HSYNC_B, s3_priv->saveHSYNC_B);
+ S3_WRITE(VTOTAL_B, s3_priv->saveVTOTAL_B);
+ S3_WRITE(VBLANK_B, s3_priv->saveVBLANK_B);
+ S3_WRITE(VSYNC_B, s3_priv->saveVSYNC_B);
+ S3_WRITE(BCLRPAT_B, s3_priv->saveBCLRPAT_B);
+
+ /* Restore plane info */
+ S3_WRITE(DSPBSIZE, s3_priv->saveDSPBSIZE);
+ S3_WRITE(DSPBPOS, s3_priv->saveDSPBPOS);
+ S3_WRITE(PIPEBSRC, s3_priv->savePIPEBSRC);
+ S3_WRITE(DSPBBASE, s3_priv->saveDSPBBASE);
+ S3_WRITE(DSPBSTRIDE, s3_priv->saveDSPBSTRIDE);
+ if (IS_I965G(dev)) {
+ S3_WRITE(DSPBSURF, s3_priv->saveDSPBSURF);
+ S3_WRITE(DSPBTILEOFF, s3_priv->saveDSPBTILEOFF);
+ }
+ S3_WRITE(PIPEBCONF, s3_priv->savePIPEBCONF);
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ S3_WRITE(DSPBCNTR, s3_priv->saveDSPBCNTR);
+ S3_WRITE(DSPBBASE, S3_READ(DSPBBASE));
+
+ /* CRT state */
+ S3_WRITE(ADPA, s3_priv->saveADPA);
+
+ /* LVDS state */
+ if (IS_I965G(dev))
+ S3_WRITE(BLC_PWM_CTL2, s3_priv->saveBLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ S3_WRITE(LVDS, s3_priv->saveLVDS);
+ if (!IS_I830(dev) && !IS_845G(dev))
+ S3_WRITE(PFIT_CONTROL, s3_priv->savePFIT_CONTROL);
+
+ S3_WRITE(PFIT_PGM_RATIOS, s3_priv->savePFIT_PGM_RATIOS);
+ S3_WRITE(BLC_PWM_CTL, s3_priv->saveBLC_PWM_CTL);
+ S3_WRITE(LVDSPP_ON, s3_priv->saveLVDSPP_ON);
+ S3_WRITE(LVDSPP_OFF, s3_priv->saveLVDSPP_OFF);
+ S3_WRITE(PP_CYCLE, s3_priv->savePP_CYCLE);
+ S3_WRITE(PP_CONTROL, s3_priv->savePP_CONTROL);
+
+ /* FIXME: restore TV & SDVO state */
+
+ /* FBC info */
+ S3_WRITE(FBC_CFB_BASE, s3_priv->saveFBC_CFB_BASE);
+ S3_WRITE(FBC_LL_BASE, s3_priv->saveFBC_LL_BASE);
+ S3_WRITE(FBC_CONTROL2, s3_priv->saveFBC_CONTROL2);
+ S3_WRITE(FBC_CONTROL, s3_priv->saveFBC_CONTROL);
+
+ /* VGA state */
+ S3_WRITE(VGACNTRL, s3_priv->saveVGACNTRL);
+ S3_WRITE(VCLK_DIVISOR_VGA0, s3_priv->saveVCLK_DIVISOR_VGA0);
+ S3_WRITE(VCLK_DIVISOR_VGA1, s3_priv->saveVCLK_DIVISOR_VGA1);
+ S3_WRITE(VCLK_POST_DIV, s3_priv->saveVCLK_POST_DIV);
+ drv_usecwait(150);
+
+ i915_restore_vga(dev);
+}
+static int
+i915_resume(struct drm_device *dev)
+{
+ ddi_acc_handle_t conf_hdl;
+ struct s3_i915_private *s3_priv = dev->s3_private;
+ int i;
+
+ if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
+ DRM_ERROR(("i915_resume: pci_config_setup fail"));
+ return (DDI_FAILURE);
+ }
+ /*
+ * Nexus driver will resume pci config space and set the power state
+ * for its children. So we needn't resume them explicitly here.
+ * see pci_pre_resume for detail.
+ */
+ pci_config_put8(conf_hdl, LBB, s3_priv->saveLBB);
+
+ if (IS_I965G(dev) && IS_MOBILE(dev))
+ S3_WRITE(MCHBAR_RENDER_STANDBY, s3_priv->saveRENDERSTANDBY);
+ if (IS_I965GM(dev))
+ (void) S3_READ(MCHBAR_RENDER_STANDBY);
+
+ S3_WRITE(HWS_PGA, s3_priv->saveHWS);
+ if (IS_I965GM(dev))
+ (void) S3_READ(HWS_PGA);
+
+ i915_restore_display(dev);
+
+ /* Clock gating state */
+ S3_WRITE (D_STATE, s3_priv->saveD_STATE);
+ S3_WRITE (CG_2D_DIS, s3_priv->saveCG_2D_DIS);
+
+ /* Cache mode state */
+ S3_WRITE (CACHE_MODE_0, s3_priv->saveCACHE_MODE_0 | 0xffff0000);
+
+ /* Memory arbitration state */
+ S3_WRITE (MI_ARB_STATE, s3_priv->saveMI_ARB_STATE | 0xffff0000);
+
+ for (i = 0; i < 16; i++) {
+ S3_WRITE(SWF0 + (i << 2), s3_priv->saveSWF0[i]);
+ S3_WRITE(SWF10 + (i << 2), s3_priv->saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ S3_WRITE(SWF30 + (i << 2), s3_priv->saveSWF2[i]);
+
+ S3_WRITE(I915REG_PGTBL_CTRL, s3_priv->pgtbl_ctl);
+
+ (void) pci_config_teardown(&conf_hdl);
+
+ drm_agp_rebind(dev);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+i915_suspend(struct drm_device *dev)
+{
+ ddi_acc_handle_t conf_hdl;
+ struct s3_i915_private *s3_priv = dev->s3_private;
+ int i;
+
+ if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
+ DRM_ERROR(("i915_suspend: pci_config_setup fail"));
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Nexus driver will resume pci config space for its children.
+ * So pci config registers are not saved here.
+ */
+ s3_priv->saveLBB = pci_config_get8(conf_hdl, LBB);
+
+ if (IS_I965G(dev) && IS_MOBILE(dev))
+ s3_priv->saveRENDERSTANDBY = S3_READ(MCHBAR_RENDER_STANDBY);
+
+ /* Hardware status page */
+ s3_priv->saveHWS = S3_READ(HWS_PGA);
+
+ i915_save_display(dev);
+
+ /* Interrupt state */
+ s3_priv->saveIIR = S3_READ(IIR);
+ s3_priv->saveIER = S3_READ(IER);
+ s3_priv->saveIMR = S3_READ(IMR);
+
+ /* Clock gating state */
+ s3_priv->saveD_STATE = S3_READ(D_STATE);
+ s3_priv->saveCG_2D_DIS = S3_READ(CG_2D_DIS);
+
+ /* Cache mode state */
+ s3_priv->saveCACHE_MODE_0 = S3_READ(CACHE_MODE_0);
+
+ /* Memory Arbitration state */
+ s3_priv->saveMI_ARB_STATE = S3_READ(MI_ARB_STATE);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ s3_priv->saveSWF0[i] = S3_READ(SWF0 + (i << 2));
+ s3_priv->saveSWF1[i] = S3_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ s3_priv->saveSWF2[i] = S3_READ(SWF30 + (i << 2));
+
+ /*
+ * Save page table control register
+ */
+ s3_priv->pgtbl_ctl = S3_READ(I915REG_PGTBL_CTRL);
+
+ (void) pci_config_teardown(&conf_hdl);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * This funtion check the length of memory mapped IO space to get the right bar. * And There are two possibilities here.
+ * 1. The MMIO registers is in memory map IO bar with 1M size. The bottom half
+ * of the 1M space is the MMIO registers.
+ * 2. The MMIO register is in memory map IO with 512K size. The whole 512K
+ * space is the MMIO registers.
+ */
+static int
+i915_map_regs(dev_info_t *dip, caddr_t *save_addr, ddi_acc_handle_t *handlep)
+{
+ int rnumber;
+ int nregs;
+ off_t size = 0;
+
+ if (ddi_dev_nregs(dip, &nregs)) {
+ cmn_err(CE_WARN, "i915_map_regs: failed to get nregs");
+ return (DDI_FAILURE);
+ }
+
+ for (rnumber = 1; rnumber < nregs; rnumber++) {
+ (void) ddi_dev_regsize(dip, rnumber, &size);
+ if ((size == 0x80000) ||
+ (size == 0x100000) ||
+ (size == 0x400000))
+ break;
+ }
+
+ if (rnumber >= nregs) {
+ cmn_err(CE_WARN,
+ "i915_map_regs: failed to find MMIO registers");
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_regs_map_setup(dip, rnumber, save_addr,
+ 0, 0x80000, &s3_attr, handlep)) {
+ cmn_err(CE_WARN,
+ "i915_map_regs: failed to map bar %d", rnumber);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+static void
+i915_unmap_regs(ddi_acc_handle_t *handlep)
+{
+ ddi_regs_map_free(handlep);
+}
+static int
+i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ drm_device_t *statep;
+ s3_i915_private_t *s3_private;
+ void *handle;
+ int unit;
+
+ unit = ddi_get_instance(dip);
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+ case DDI_RESUME:
+ statep = ddi_get_soft_state(i915_statep, unit);
+ return (i915_resume(statep));
+ default:
+ DRM_ERROR("i915_attach: attach and resume ops are supported");
+ return (DDI_FAILURE);
+
+ }
+
+ if (ddi_soft_state_zalloc(i915_statep, unit) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "i915_attach: failed to alloc softstate");
+ return (DDI_FAILURE);
+ }
+ statep = ddi_get_soft_state(i915_statep, unit);
+ statep->dip = dip;
+ statep->driver = &i915_driver;
+
+ statep->s3_private = drm_alloc(sizeof(s3_i915_private_t),
+ DRM_MEM_DRIVER);
+
+ if (statep->s3_private == NULL) {
+ cmn_err(CE_WARN, "i915_attach: failed to allocate s3 priv");
+ goto err_exit1;
+ }
+
+ /*
+ * Map in the mmio register space for s3.
+ */
+ s3_private = (s3_i915_private_t *)statep->s3_private;
+
+ if (i915_map_regs(dip, &s3_private->saveAddr,
+ &s3_private->saveHandle)) {
+ cmn_err(CE_WARN, "i915_attach: failed to map MMIO");
+ goto err_exit2;
+ }
+
+ /*
+ * Call drm_supp_register to create minor nodes for us
+ */
+ handle = drm_supp_register(dip, statep);
+ if ( handle == NULL) {
+ DRM_ERROR("i915_attach: drm_supp_register failed");
+ goto err_exit3;
+ }
+ statep->drm_handle = handle;
+
+ /*
+ * After drm_supp_register, we can call drm_xxx routine
+ */
+ statep->drm_supported = DRM_UNSUPPORT;
+ if (
+ drm_probe(statep, i915_pciidlist) != DDI_SUCCESS) {
+ DRM_ERROR("i915_open: "
+ "DRM current don't support this graphics card");
+ goto err_exit4;
+ }
+ statep->drm_supported = DRM_SUPPORT;
+
+ /* call common attach code */
+ if (drm_attach(statep) != DDI_SUCCESS) {
+ DRM_ERROR("i915_attach: drm_attach failed");
+ goto err_exit4;
+ }
+ return (DDI_SUCCESS);
+err_exit4:
+ (void) drm_supp_unregister(handle);
+err_exit3:
+ i915_unmap_regs(&s3_private->saveHandle);
+err_exit2:
+ drm_free(statep->s3_private, sizeof(s3_i915_private_t),
+ DRM_MEM_DRIVER);
+err_exit1:
+ (void) ddi_soft_state_free(i915_statep, unit);
+
+ return (DDI_FAILURE);
+
+} /* i915_attach() */
+
+static int
+i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ drm_device_t *statep;
+ int unit;
+ s3_i915_private_t *s3_private;
+
+ if ((cmd != DDI_SUSPEND) && (cmd != DDI_DETACH)) {
+ DRM_ERROR("i915_detach: "
+ "only detach and resume ops are supported");
+ return (DDI_FAILURE);
+ }
+
+ unit = ddi_get_instance(dip);
+ statep = ddi_get_soft_state(i915_statep, unit);
+ if (statep == NULL) {
+ DRM_ERROR("i915_detach: can not get soft state");
+ return (DDI_FAILURE);
+ }
+
+ if (cmd == DDI_SUSPEND)
+ return (i915_suspend(statep));
+
+ s3_private = (s3_i915_private_t *)statep->s3_private;
+ ddi_regs_map_free(&s3_private->saveHandle);
+
+ /*
+ * Free the struct for context saving in S3
+ */
+ drm_free(statep->s3_private, sizeof(s3_i915_private_t),
+ DRM_MEM_DRIVER);
+
+ (void) drm_detach(statep);
+ (void) drm_supp_unregister(statep->drm_handle);
+ (void) ddi_soft_state_free(i915_statep, unit);
+
+ return (DDI_SUCCESS);
+
+} /* i915_detach() */
+
+
+/*ARGSUSED*/
+static int
+i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ drm_device_t *statep;
+ int error = DDI_SUCCESS;
+ int unit;
+
+ unit = drm_dev_to_instance((dev_t)arg);
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ statep = ddi_get_soft_state(i915_statep, unit);
+ if (statep == NULL || statep->dip == NULL) {
+ error = DDI_FAILURE;
+ } else {
+ *result = (void *) statep->dip;
+ error = DDI_SUCCESS;
+ }
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)unit;
+ error = DDI_SUCCESS;
+ break;
+ default:
+ error = DDI_FAILURE;
+ break;
+ }
+ return (error);
+
+} /* i915_info() */
+
+
+static void i915_configure(drm_driver_t *driver)
+{
+ driver->buf_priv_size = 1; /* No dev_priv */
+ driver->load = i915_driver_load;
+ driver->unload = i915_driver_unload;
+ driver->open = i915_driver_open;
+ driver->preclose = i915_driver_preclose;
+ driver->postclose = i915_driver_postclose;
+ driver->lastclose = i915_driver_lastclose;
+ driver->device_is_agp = i915_driver_device_is_agp;
+ driver->enable_vblank = i915_enable_vblank;
+ driver->disable_vblank = i915_disable_vblank;
+ driver->irq_preinstall = i915_driver_irq_preinstall;
+ driver->irq_postinstall = i915_driver_irq_postinstall;
+ driver->irq_uninstall = i915_driver_irq_uninstall;
+ driver->irq_handler = i915_driver_irq_handler;
+
+ driver->gem_init_object = i915_gem_init_object;
+ driver->gem_free_object = i915_gem_free_object;
+
+ driver->driver_ioctls = i915_ioctls;
+ driver->max_driver_ioctl = i915_max_ioctl;
+
+ driver->driver_name = DRIVER_NAME;
+ driver->driver_desc = DRIVER_DESC;
+ driver->driver_date = DRIVER_DATE;
+ driver->driver_major = DRIVER_MAJOR;
+ driver->driver_minor = DRIVER_MINOR;
+ driver->driver_patchlevel = DRIVER_PATCHLEVEL;
+
+ driver->use_agp = 1;
+ driver->require_agp = 1;
+ driver->use_irq = 1;
+}
+
+static int i915_quiesce(dev_info_t *dip)
+{
+ drm_device_t *statep;
+ int unit;
+
+ unit = ddi_get_instance(dip);
+ statep = ddi_get_soft_state(i915_statep, unit);
+ if (statep == NULL) {
+ return (DDI_FAILURE);
+ }
+ i915_driver_irq_uninstall(statep);
+
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/intel/io/drm/i915_drv.h b/usr/src/uts/intel/io/drm/i915_drv.h
new file mode 100644
index 0000000..ccf4bd6
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_drv.h
@@ -0,0 +1,1842 @@
+/* BEGIN CSTYLED */
+
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _I915_DRV_H
+#define _I915_DRV_H
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
+
+#define DRIVER_NAME "i915"
+#define DRIVER_DESC "Intel Graphics"
+#define DRIVER_DATE "20080730"
+
+#if defined(__SVR4) && defined(__sun)
+#define spinlock_t kmutex_t
+#endif
+
+#define I915_NUM_PIPE 2
+
+#define I915_NUM_PIPE 2
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ * - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 6
+#define DRIVER_PATCHLEVEL 0
+
+#if defined(__linux__)
+#define I915_HAVE_FENCE
+#define I915_HAVE_BUFFER
+#endif
+#define I915_HAVE_GEM 1
+
+typedef struct _drm_i915_ring_buffer {
+ int tail_mask;
+ unsigned long Size;
+ u8 *virtual_start;
+ int head;
+ int tail;
+ int space;
+ drm_local_map_t map;
+ struct drm_gem_object *ring_obj;
+} drm_i915_ring_buffer_t;
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ int start;
+ int size;
+ drm_file_t *filp; /* 0: free, -1: heap, other: real files */
+};
+
+typedef struct _drm_i915_vbl_swap {
+ struct list_head head;
+ drm_drawable_t drw_id;
+ unsigned int plane;
+ unsigned int sequence;
+ int flip;
+} drm_i915_vbl_swap_t;
+
+typedef struct s3_i915_private {
+ ddi_acc_handle_t saveHandle;
+ caddr_t saveAddr;
+ uint32_t pgtbl_ctl;
+ uint8_t saveLBB;
+ uint32_t saveDSPACNTR;
+ uint32_t saveDSPBCNTR;
+ uint32_t saveDSPARB;
+ uint32_t saveRENDERSTANDBY;
+ uint32_t saveHWS;
+ uint32_t savePIPEACONF;
+ uint32_t savePIPEBCONF;
+ uint32_t savePIPEASRC;
+ uint32_t savePIPEBSRC;
+ uint32_t saveFPA0;
+ uint32_t saveFPA1;
+ uint32_t saveDPLL_A;
+ uint32_t saveDPLL_A_MD;
+ uint32_t saveHTOTAL_A;
+ uint32_t saveHBLANK_A;
+ uint32_t saveHSYNC_A;
+ uint32_t saveVTOTAL_A;
+ uint32_t saveVBLANK_A;
+ uint32_t saveVSYNC_A;
+ uint32_t saveBCLRPAT_A;
+ uint32_t saveDSPASTRIDE;
+ uint32_t saveDSPASIZE;
+ uint32_t saveDSPAPOS;
+ uint32_t saveDSPABASE;
+ uint32_t saveDSPASURF;
+ uint32_t saveDSPATILEOFF;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+ uint32_t saveBLC_PWM_CTL2;
+ uint32_t saveFPB0;
+ uint32_t saveFPB1;
+ uint32_t saveDPLL_B;
+ uint32_t saveDPLL_B_MD;
+ uint32_t saveHTOTAL_B;
+ uint32_t saveHBLANK_B;
+ uint32_t saveHSYNC_B;
+ uint32_t saveVTOTAL_B;
+ uint32_t saveVBLANK_B;
+ uint32_t saveVSYNC_B;
+ uint32_t saveBCLRPAT_B;
+ uint32_t saveDSPBSTRIDE;
+ uint32_t saveDSPBSIZE;
+ uint32_t saveDSPBPOS;
+ uint32_t saveDSPBBASE;
+ uint32_t saveDSPBSURF;
+ uint32_t saveDSPBTILEOFF;
+ uint32_t saveVCLK_DIVISOR_VGA0;
+ uint32_t saveVCLK_DIVISOR_VGA1;
+ uint32_t saveVCLK_POST_DIV;
+ uint32_t saveVGACNTRL;
+ uint32_t saveADPA;
+ uint32_t saveLVDS;
+ uint32_t saveLVDSPP_ON;
+ uint32_t saveLVDSPP_OFF;
+ uint32_t saveDVOA;
+ uint32_t saveDVOB;
+ uint32_t saveDVOC;
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t save_palette_a[256];
+ uint32_t save_palette_b[256];
+ uint32_t saveFBC_CFB_BASE;
+ uint32_t saveFBC_LL_BASE;
+ uint32_t saveFBC_CONTROL;
+ uint32_t saveFBC_CONTROL2;
+ uint32_t saveIER;
+ uint32_t saveIIR;
+ uint32_t saveIMR;
+ uint32_t saveD_STATE;
+ uint32_t saveCG_2D_DIS;
+ uint32_t saveMI_ARB_STATE;
+ uint32_t savePIPEASTAT;
+ uint32_t savePIPEBSTAT;
+ uint32_t saveCACHE_MODE_0;
+ uint32_t saveSWF0[16];
+ uint32_t saveSWF1[16];
+ uint32_t saveSWF2[3];
+ uint8_t saveMSR;
+ uint8_t saveSR[8];
+ uint8_t saveGR[25];
+ uint8_t saveAR_INDEX;
+ uint8_t saveAR[21];
+ uint8_t saveDACMASK;
+ uint8_t saveDACDATA[256*3]; /* 256 3-byte colors */
+ uint8_t saveCR[37];
+} s3_i915_private_t;
+
+struct drm_i915_error_state {
+ u32 eir;
+ u32 pgtbl_er;
+ u32 pipeastat;
+ u32 pipebstat;
+ u32 ipeir;
+ u32 ipehr;
+ u32 instdone;
+ u32 acthd;
+ u32 instpm;
+ u32 instps;
+ u32 instdone1;
+ u32 seqno;
+ struct timeval time;
+};
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ drm_local_map_t *sarea;
+ drm_local_map_t *mmio_map;
+
+ drm_i915_sarea_t *sarea_priv;
+ drm_i915_ring_buffer_t ring;
+
+ drm_dma_handle_t *status_page_dmah;
+ void *hw_status_page;
+ dma_addr_t dma_status_page;
+ uint32_t counter;
+ unsigned int status_gfx_addr;
+ drm_local_map_t hws_map;
+ struct drm_gem_object *hws_obj;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ wait_queue_head_t irq_queue;
+ atomic_t irq_received;
+ /** Protects user_irq_refcount and irq_mask_reg */
+ spinlock_t user_irq_lock;
+ /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
+ int user_irq_refcount;
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ int irq_mask_reg;
+ uint32_t pipestat[2];
+ /** splitted irq regs for graphics and display engine on IGDNG,
+ irq_mask_reg is still used for display irq. */
+ u32 gt_irq_mask_reg;
+ u32 gt_irq_enable_reg;
+ u32 de_irq_enable_reg;
+
+ int tex_lru_log_granularity;
+ int allow_batchbuffer;
+ struct mem_block *agp_heap;
+ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+ int vblank_pipe;
+
+ spinlock_t error_lock;
+ struct drm_i915_error_state *first_error;
+
+ struct {
+ struct drm_mm gtt_space;
+
+ drm_local_map_t gtt_mapping;
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Waiting sequence number, if any
+ */
+ uint32_t waiting_gem_seqno;
+
+ /**
+ * Last seq seen at irq time
+ */
+ uint32_t irq_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /**
+ * Flag if the hardware appears to be wedged.
+ *
+ * This is set when attempts to idle the device timeout.
+ * It prevents command submission from occuring and makes
+ * every pending request fail
+ */
+ int wedged;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+ } mm;
+
+} drm_i915_private_t;
+
+struct drm_track {
+ struct drm_track *next, *prev;
+ caddr_t contain_ptr;
+ struct drm_gem_object *obj;
+ uint32_t name;
+ uint64_t offset;
+
+};
+
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head list;
+
+ struct drm_gem_object *obj;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_mm_node *gtt_space;
+
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ int active;
+
+ /**
+ * This is set if the object has been written to since last bound
+ * to the GTT
+ */
+ int dirty;
+
+ /** AGP memory structure for our GTT binding. */
+ int agp_mem;
+
+ caddr_t *page_list;
+
+ pfn_t *pfnarray;
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ /** Boolean whether this object has a valid gtt offset. */
+ int gtt_bound;
+
+ /** How many users have pinned this object in GTT space */
+ int pin_count;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
+
+ /** Current tiling mode for the object. */
+ uint32_t tiling_mode;
+ uint32_t stride;
+ /**
+ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
+ * GEM_DOMAIN_CPU is not in the object's read domain.
+ */
+ uint8_t *page_cpu_valid;
+ /** User space pin count and filp owning the pin */
+ uint32_t user_pin_count;
+ struct drm_file *pin_filp;
+ /**
+ * Used for checking the object doesn't appear more than once
+ * in an execbuffer object list.
+ */
+ int in_execbuffer;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ struct list_head list;
+
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** Cache domains that were flushed at the start of the request. */
+ uint32_t flush_domains;
+
+};
+
+struct drm_i915_file_private {
+ struct {
+ uint32_t last_gem_seqno;
+ uint32_t last_gem_throttle_seqno;
+ } mm;
+};
+
+
+enum intel_chip_family {
+ CHIP_I8XX = 0x01,
+ CHIP_I9XX = 0x02,
+ CHIP_I915 = 0x04,
+ CHIP_I965 = 0x08,
+};
+
+extern drm_ioctl_desc_t i915_ioctls[];
+extern int i915_max_ioctl;
+extern void i915_save_display(struct drm_device *dev);
+extern void i915_restore_display(struct drm_device *dev);
+
+ /* i915_dma.c */
+extern void i915_kernel_lost_context(drm_device_t * dev);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *dev);
+extern int i915_driver_open(drm_device_t * dev, drm_file_t *file_priv);
+extern void i915_driver_lastclose(drm_device_t * dev);
+extern void i915_driver_preclose(drm_device_t * dev, drm_file_t *filp);
+extern void i915_driver_postclose(drm_device_t * dev,
+ struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(drm_device_t * dev);
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+extern int i915_emit_box(struct drm_device *dev,
+ struct drm_clip_rect __user *boxes,
+ int i, int DR1, int DR4);
+extern void i915_emit_breadcrumb(struct drm_device *dev);
+extern void i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
+extern void i915_handle_error(struct drm_device *dev);
+
+/* i915_irq.c */
+extern int i915_irq_emit(DRM_IOCTL_ARGS);
+extern int i915_irq_wait(DRM_IOCTL_ARGS);
+
+extern int i915_enable_vblank(struct drm_device *dev, int crtc);
+extern void i915_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
+extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
+extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
+extern int i915_driver_irq_preinstall(drm_device_t * dev);
+extern void i915_driver_irq_postinstall(drm_device_t * dev);
+extern void i915_driver_irq_uninstall(drm_device_t * dev);
+extern int i915_emit_irq(drm_device_t * dev);
+extern int i915_vblank_swap(DRM_IOCTL_ARGS);
+extern void i915_user_irq_on(drm_device_t * dev);
+extern void i915_user_irq_off(drm_device_t * dev);
+extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
+extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
+
+/* i915_mem.c */
+extern int i915_mem_alloc(DRM_IOCTL_ARGS);
+extern int i915_mem_free(DRM_IOCTL_ARGS);
+extern int i915_mem_init_heap(DRM_IOCTL_ARGS);
+extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
+extern void i915_mem_takedown(struct mem_block **heap);
+extern void i915_mem_release(drm_device_t * dev,
+ drm_file_t *filp, struct mem_block *heap);
+extern struct mem_block **get_heap(drm_i915_private_t *, int);
+extern struct mem_block *find_block_by_proc(struct mem_block *, drm_file_t *);
+extern void mark_block(drm_device_t *, struct mem_block *, int);
+extern void free_block(struct mem_block *);
+
+/* i915_gem.c */
+int i915_gem_init_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_create_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_pread_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_mmap_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_execbuffer(DRM_IOCTL_ARGS);
+int i915_gem_pin_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_unpin_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_busy_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_throttle_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_entervt_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_set_tiling(DRM_IOCTL_ARGS);
+int i915_gem_get_tiling(DRM_IOCTL_ARGS);
+int i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS);
+void i915_gem_load(struct drm_device *dev);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
+int i915_gem_object_unbind(struct drm_gem_object *obj, uint32_t type);
+void i915_gem_lastclose(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_work_handler(void *dev);
+void i915_gem_clflush_object(struct drm_gem_object *obj);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+
+/* i915_gem_tiling.c */
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+
+/* i915_gem_debug.c */
+void i915_gem_command_decode(uint32_t *data, int count,
+ uint32_t hw_offset, struct drm_device *dev);
+/* i915_gem_regdump.c */
+int i915_reg_dump_show(struct drm_device *dev, void *v);
+#ifdef I915_HAVE_FENCE
+/* i915_fence.c */
+
+
+extern void i915_fence_handler(drm_device_t *dev);
+extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class,
+ uint32_t flags,
+ uint32_t *sequence,
+ uint32_t *native_type);
+extern void i915_poke_flush(drm_device_t *dev, uint32_t class);
+extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags);
+#endif
+
+#ifdef I915_HAVE_BUFFER
+/* i915_buffer.c */
+extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
+extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
+extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
+extern int i915_init_mem_type(drm_device_t *dev, uint32_t type,
+ drm_mem_type_manager_t *man);
+extern uint32_t i915_evict_mask(drm_buffer_object_t *bo);
+extern int i915_move(drm_buffer_object_t *bo, int evict,
+ int no_wait, drm_bo_mem_reg_t *new_mem);
+
+#endif
+
+#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+#define S3_READ(reg) \
+ *(uint32_t volatile *)((uintptr_t)s3_priv->saveAddr + (reg))
+#define S3_WRITE(reg, val) \
+ *(uint32_t volatile *)((uintptr_t)s3_priv->saveAddr + (reg)) = (val)
+
+#define I915_VERBOSE 0
+#define I915_RING_VALIDATE 0
+
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
+#define RING_LOCALS unsigned int outring, ringmask, outcount; \
+ volatile unsigned char *virt;
+
+
+#define I915_RING_VALIDATE 0
+
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
+#if I915_VERBOSE
+#define BEGIN_LP_RING(n) do { \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
+ DRM_DEBUG("dev_priv->ring.virtual_start (%lx)\n", (dev_priv->ring.virtual_start)); \
+ I915_RING_DO_VALIDATE(dev); \
+ if (dev_priv->ring.space < (n)*4) \
+ (void) i915_wait_ring(dev, (n)*4, __FUNCTION__); \
+ outcount = 0; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+ virt = dev_priv->ring.virtual_start; \
+} while (*"\0")
+#else
+#define BEGIN_LP_RING(n) do { \
+ I915_RING_DO_VALIDATE(dev); \
+ if (dev_priv->ring.space < (n)*4) \
+ (void) i915_wait_ring(dev, (n)*4, __FUNCTION__); \
+ outcount = 0; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+ virt = dev_priv->ring.virtual_start; \
+} while (*"\0")
+#endif
+
+#if I915_VERBOSE
+#define OUT_RING(n) do { \
+ DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
+ *(volatile unsigned int *)(void *)(virt + outring) = (n); \
+ outcount++; \
+ outring += 4; \
+ outring &= ringmask; \
+} while (*"\0")
+#else
+#define OUT_RING(n) do { \
+ *(volatile unsigned int *)(void *)(virt + outring) = (n); \
+ outcount++; \
+ outring += 4; \
+ outring &= ringmask; \
+} while (*"\0")
+#endif
+
+#if I915_VERBOSE
+#define ADVANCE_LP_RING() do { \
+ DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ I915_RING_DO_VALIDATE(dev); \
+ dev_priv->ring.tail = outring; \
+ dev_priv->ring.space -= outcount * 4; \
+ I915_WRITE(PRB0_TAIL, outring); \
+} while (*"\0")
+#else
+#define ADVANCE_LP_RING() do { \
+ I915_RING_DO_VALIDATE(dev); \
+ dev_priv->ring.tail = outring; \
+ dev_priv->ring.space -= outcount * 4; \
+ I915_WRITE(PRB0_TAIL, outring); \
+} while (*"\0")
+#endif
+
+extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
+
+/* Extended config space */
+#define LBB 0xf4
+#define GDRST 0xc0
+#define GDRST_FULL (0<<2)
+#define GDRST_RENDER (1<<2)
+#define GDRST_MEDIA (3<<2)
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define VGA_MSR_MEM_EN (1<<1)
+#define VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define VGA_GR_MEM_READ_MODE_SHIFT 3
+#define VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define VGA_GR_MEM_MODE_MASK 0xc
+#define VGA_GR_MEM_MODE_SHIFT 2
+#define VGA_GR_MEM_A0000_AFFFF 0
+#define VGA_GR_MEM_A0000_BFFFF 1
+#define VGA_GR_MEM_B0000_B7FFF 2
+#define VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD (7<<23)
+#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
+
+#define INST_PARSER_CLIENT 0x00000000
+#define INST_OP_FLUSH 0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+#define MI_USER_INTERRUPT MI_INSTR(2, (0 << 29))
+#define MI_FLUSH (0x04 << 23)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
+
+#define BB1_START_ADDR_MASK (~0x7)
+#define BB1_PROTECTED (1<<0)
+#define BB1_UNPROTECTED (0<<0)
+#define BB2_END_ADDR_MASK (~0x7)
+
+#define I915REG_PGTBL_CTRL 0x2020
+#define IPEIR 0x02088
+#define HWSTAM 0x02098
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define IER 0x020a0
+#define INSTPM 0x020c0
+#define ACTHD 0x020c8
+#define PIPEASTAT 0x70024
+#define PIPEBSTAT 0x71024
+#define ACTHD_I965 0x02074
+#define HWS_PGA 0x02080
+#define IPEIR_I965 0x02064
+#define IPEHR_I965 0x02068
+#define INSTDONE_I965 0x0206c
+#define INSTPS 0x02070 /* 965+ only */
+#define INSTDONE1 0x0207c /* 965+ only */
+#define IPEHR 0x0208c
+#define INSTDONE 0x02090
+#define EIR 0x020b0
+#define EMR 0x020b4
+#define ESR 0x020b8
+#define GM45_ERROR_PAGE_TABLE (1<<5)
+#define GM45_ERROR_MEM_PRIV (1<<4)
+#define I915_ERROR_PAGE_TABLE (1<<4)
+#define GM45_ERROR_CP_PRIV (1<<3)
+#define I915_ERROR_MEMORY_REFRESH (1<<1)
+#define I915_ERROR_INSTRUCTION (1<<0)
+
+#define PIPEA_FRMCOUNT_GM45 0x70040
+#define PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPEB_FRMCOUNT_GM45 0x71040
+#define PIPEB_FLIPCOUNT_GM45 0x71044
+
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define I915_VBLANK_CLEAR (1UL<<1)
+
+#define SRX_INDEX 0x3c4
+#define SRX_DATA 0x3c5
+#define SR01 1
+#define SR01_SCREEN_OFF (1<<5)
+
+#define PPCR 0x61204
+#define PPCR_ON (1<<0)
+
+#define DVOB 0x61140
+#define DVOB_ON (1<<31)
+#define DVOC 0x61160
+#define DVOC_ON (1<<31)
+#define LVDS 0x61180
+#define LVDS_ON (1<<31)
+
+#define ADPA 0x61100
+#define ADPA_DPMS_MASK (~(3<<10))
+#define ADPA_DPMS_ON (0<<10)
+#define ADPA_DPMS_SUSPEND (1<<10)
+#define ADPA_DPMS_STANDBY (2<<10)
+#define ADPA_DPMS_OFF (3<<10)
+
+#ifdef NOPID
+#undef NOPID
+#endif
+#define NOPID 0x2094
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define TAIL_ADDR 0x001FFFF8
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x0xFFFFF000
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+#define PGTBL_ER 0x02024
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
+#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
+
+#define MI_BATCH_BUFFER ((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START (0x31<<23)
+#define MI_BATCH_BUFFER_END (0xA<<23)
+#define MI_BATCH_NON_SECURE (1)
+
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+
+#define MI_WAIT_FOR_EVENT ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+
+#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
+
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
+
+#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_BREADCRUMB_INDEX 0x21
+
+/*
+ * add here for S3 support
+ */
+#define DPLL_A 0x06014
+#define DPLL_B 0x06018
+# define DPLL_VCO_ENABLE 0x80000000 /* (1 << 31) */
+# define DPLL_DVO_HIGH_SPEED (1 << 30)
+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
+# define DPLL_VGA_MODE_DIS (1 << 28)
+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+# define DPLL_MODE_MASK (3 << 26)
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+
+/**
+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/**
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+# define PLL_REF_INPUT_DREFCLK (0 << 13)
+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+# define PLL_REF_INPUT_MASK (3 << 13)
+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
+
+/* IGDNG */
+#define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+#define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+#define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+#define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+#define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+
+/**
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+# define SDVO_MULTIPLIER_MASK 0x000000ff
+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
+# define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+/** @defgroup DPLL_MD
+ * @{
+ */
+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD 0x0601c
+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD 0x06020
+/**
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/**
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+/** @} */
+
+#define DPLL_TEST 0x606c
+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+# define DPLLB_TEST_N_BYPASS (1 << 19)
+# define DPLLB_TEST_M_BYPASS (1 << 18)
+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+# define DPLLA_TEST_N_BYPASS (1 << 3)
+# define DPLLA_TEST_M_BYPASS (1 << 2)
+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A 0x0a000
+#define PALETTE_B 0x0a800
+
+/* MCH MMIO space */
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way. It is not accessible from the CP register read instructions.
+ *
+ */
+#define MCHBAR_MIRROR_BASE 0x10000
+
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC 0x10200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3 0x10206
+#define C1DRB3 0x10606
+
+/** GM965 GM45 render standby register */
+#define MCHBAR_RENDER_STANDBY 0x111B8
+
+#define FPA0 0x06040
+#define FPA1 0x06044
+#define FPB0 0x06048
+#define FPB1 0x0604c
+
+#define D_STATE 0x6104
+#define CG_2D_DIS 0x6200
+#define CG_3D_DIS 0x6204
+
+#define MI_ARB_STATE 0x20e4
+
+/*
+ * Cache mode 0 reg.
+ * - Manipulating render cache behaviour is central
+ * to the concept of zone rendering, tuning this reg can help avoid
+ * unnecessary render cache reads and even writes (for z/stencil)
+ * at beginning and end of scene.
+ *
+ * - To change a bit, write to this reg with a mask bit set and the
+ * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
+ */
+#define CACHE_MODE_0 0x2120
+
+/* I830 CRTC registers */
+#define HTOTAL_A 0x60000
+#define HBLANK_A 0x60004
+#define HSYNC_A 0x60008
+#define VTOTAL_A 0x6000c
+#define VBLANK_A 0x60010
+#define VSYNC_A 0x60014
+#define PIPEASRC 0x6001c
+#define BCLRPAT_A 0x60020
+#define VSYNCSHIFT_A 0x60028
+
+#define HTOTAL_B 0x61000
+#define HBLANK_B 0x61004
+#define HSYNC_B 0x61008
+#define VTOTAL_B 0x6100c
+#define VBLANK_B 0x61010
+#define VSYNC_B 0x61014
+#define PIPEBSRC 0x6101c
+#define BCLRPAT_B 0x61020
+#define VSYNCSHIFT_B 0x61028
+
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_8BPP (0x2<<26)
+#define DISPPLANE_15_16BPP (0x4<<26)
+#define DISPPLANE_16BPP (0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
+#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1<<24)
+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+
+#define DSPABASE 0x70184
+#define DSPASTRIDE 0x70188
+
+#define DSPBBASE 0x71184
+#define DSPBADDR DSPBBASE
+#define DSPBSTRIDE 0x71188
+
+#define DSPAKEYVAL 0x70194
+#define DSPAKEYMASK 0x70198
+
+#define DSPAPOS 0x7018C /* reserved */
+#define DSPASIZE 0x70190
+#define DSPBPOS 0x7118C
+#define DSPBSIZE 0x71190
+
+#define DSPASURF 0x7019C
+#define DSPATILEOFF 0x701A4
+
+#define DSPBSURF 0x7119C
+#define DSPBTILEOFF 0x711A4
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE (1UL<<31)
+#define PIPEACONF_DISABLE 0
+#define PIPEACONF_DOUBLE_WIDE (1<<30)
+#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPEACONF_SINGLE_WIDE 0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_PIPE_LOCKED (1<<25)
+#define PIPEACONF_PALETTE 0
+#define PIPEACONF_GAMMA (1<<24)
+#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE (1UL<<31)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_GAMMA (1<<24)
+#define PIPEBCONF_PALETTE 0
+
+#define BLC_PWM_CTL 0x61254
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+
+#define BLC_PWM_CTL2 0x61250
+
+#define PFIT_CONTROL 0x61230
+#define PFIT_PGM_RATIOS 0x61234
+
+/**
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_ON (1 << 28)
+#define PP_SEQUENCE_OFF (2 << 28)
+#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CONTROL 0x61204
+#define POWER_TARGET_ON (1 << 0)
+
+#define LVDSPP_ON 0x61208
+#define LVDSPP_OFF 0x6120c
+#define PP_CYCLE 0x61210
+
+/* Framebuffer compression */
+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
+#define FBC_CONTROL 0x03208
+
+#define VGACNTRL 0x71400
+
+#define VCLK_DIVISOR_VGA0 0x6000
+#define VCLK_DIVISOR_VGA1 0x6004
+#define VCLK_POST_DIV 0x6010
+
+/* Framebuffer compression */
+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
+#define FBC_CONTROL 0x03208
+#define FBC_CTL_EN (1<<31)
+#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_INTERVAL_SHIFT (16)
+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_CTL_STRIDE_SHIFT (5)
+#define FBC_CTL_FENCENO (1<<0)
+#define FBC_COMMAND 0x0320c
+#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_STATUS 0x03210
+#define FBC_STAT_COMPRESSING (1<<31)
+#define FBC_STAT_COMPRESSED (1<<30)
+#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_CONTROL2 0x03214
+#define FBC_CTL_FENCE_DBL (0<<4)
+#define FBC_CTL_IDLE_IMM (0<<2)
+#define FBC_CTL_IDLE_FULL (1<<2)
+#define FBC_CTL_IDLE_LINE (2<<2)
+#define FBC_CTL_IDLE_DEBUG (3<<2)
+#define FBC_CTL_CPU_FENCE (1<<1)
+#define FBC_CTL_PLANEA (0<<0)
+#define FBC_CTL_PLANEB (1<<0)
+#define FBC_FENCE_OFF 0x0321b
+
+#define FBC_LL_SIZE (1536)
+#define FBC_LL_PAD (32)
+
+#define DSPARB 0x70030
+
+#define PIPEAFRAMEHIGH 0x70040
+#define PIPEBFRAMEHIGH 0x71040
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define PIPEAFRAMEPIXEL 0x70044
+#define PIPEBFRAMEPIXEL 0x71044
+
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+
+/* Interrupt bits:
+ */
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
+#define I915_HWB_OOM_INTERRUPT (1<<13) /* binner out of memory */
+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_USER_INTERRUPT (1<<1)
+#define I915_ASLE_INTERRUPT (1<<0)
+
+#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define I915_CRC_ERROR_ENABLE (1UL<<29)
+#define I915_CRC_DONE_ENABLE (1UL<<28)
+#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
+#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
+#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
+#define I915_DPST_EVENT_ENABLE (1UL<<23)
+#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
+#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
+#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
+#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
+#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
+#define I915_DPST_EVENT_STATUS (1UL<<7)
+#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
+#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
+#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
+
+/* GM45+ just has to be different */
+#define PIPEA_FRMCOUNT_GM45 0x70040
+#define PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPEB_FRMCOUNT_GM45 0x71040
+#define PIPEB_FLIPCOUNT_GM45 0x71044
+
+/*
+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+
+#define SWF0 0x71410
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF10 0x70410
+
+#define SWF30 0x72414
+
+/* IGDNG */
+
+#define CPU_VGACNTRL 0x41000
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
+#define DIGITAL_PORTA_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
+#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL 0x45300
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_BIOS_1 0x46004
+#define FDI_PLL_BIOS_2 0x46008
+#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
+#define DISPLAY_PORT_PLL_BIOS_1 0x46010
+#define DISPLAY_PORT_PLL_BIOS_2 0x46014
+
+#define FDI_PLL_FREQ_CTL 0x46030
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
+#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
+
+
+#define PIPEA_DATA_M1 0x60030
+#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE_MASK 0x7e000000
+#define PIPEA_DATA_M1_OFFSET 0
+#define PIPEA_DATA_N1 0x60034
+#define PIPEA_DATA_N1_OFFSET 0
+
+#define PIPEA_DATA_M2 0x60038
+#define PIPEA_DATA_M2_OFFSET 0
+#define PIPEA_DATA_N2 0x6003c
+#define PIPEA_DATA_N2_OFFSET 0
+
+#define PIPEA_LINK_M1 0x60040
+#define PIPEA_LINK_M1_OFFSET 0
+#define PIPEA_LINK_N1 0x60044
+#define PIPEA_LINK_N1_OFFSET 0
+
+#define PIPEA_LINK_M2 0x60048
+#define PIPEA_LINK_M2_OFFSET 0
+#define PIPEA_LINK_N2 0x6004c
+#define PIPEA_LINK_N2_OFFSET 0
+
+/* PIPEB timing regs are same start from 0x61000 */
+
+#define PIPEB_DATA_M1 0x61030
+#define PIPEB_DATA_M1_OFFSET 0
+#define PIPEB_DATA_N1 0x61034
+#define PIPEB_DATA_N1_OFFSET 0
+
+#define PIPEB_DATA_M2 0x61038
+#define PIPEB_DATA_M2_OFFSET 0
+#define PIPEB_DATA_N2 0x6103c
+#define PIPEB_DATA_N2_OFFSET 0
+
+#define PIPEB_LINK_M1 0x61040
+#define PIPEB_LINK_M1_OFFSET 0
+#define PIPEB_LINK_N1 0x61044
+#define PIPEB_LINK_N1_OFFSET 0
+
+#define PIPEB_LINK_M2 0x61048
+#define PIPEB_LINK_M2_OFFSET 0
+#define PIPEB_LINK_N2 0x6104c
+#define PIPEB_LINK_N2_OFFSET 0
+
+/* CPU panel fitter */
+#define PFA_CTL_1 0x68080
+#define PFB_CTL_1 0x68880
+#define PF_ENABLE (1<<31)
+
+/* legacy palette */
+#define LGC_PALETTE_A 0x4a000
+#define LGC_PALETTE_B 0x4a800
+
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL (0x80000000)
+#define DE_SPRITEB_FLIP_DONE (1 << 29)
+#define DE_SPRITEA_FLIP_DONE (1 << 28)
+#define DE_PLANEB_FLIP_DONE (1 << 27)
+#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PCU_EVENT (1 << 25)
+#define DE_GTT_FAULT (1 << 24)
+#define DE_POISON (1 << 23)
+#define DE_PERFORM_COUNTER (1 << 22)
+#define DE_PCH_EVENT (1 << 21)
+#define DE_AUX_CHANNEL_A (1 << 20)
+#define DE_DP_A_HOTPLUG (1 << 19)
+#define DE_GSE (1 << 18)
+#define DE_PIPEB_VBLANK (1 << 15)
+#define DE_PIPEB_EVEN_FIELD (1 << 14)
+#define DE_PIPEB_ODD_FIELD (1 << 13)
+#define DE_PIPEB_LINE_COMPARE (1 << 12)
+#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
+#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPEA_EVEN_FIELD (1 << 6)
+#define DE_PIPEA_ODD_FIELD (1 << 5)
+#define DE_PIPEA_LINE_COMPARE (1 << 4)
+#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+
+#define DEISR 0x44000
+#define DEIMR 0x44004
+#define DEIIR 0x44008
+#define DEIER 0x4400c
+
+/* GT interrupt */
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
+
+#define GTISR 0x44010
+#define GTIMR 0x44014
+#define GTIIR 0x44018
+#define GTIER 0x4401c
+
+/* PCH */
+
+/* south display engine interrupt */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+
+#define SDEISR 0xc4000
+#define SDEIMR 0xc4004
+#define SDEIIR 0xc4008
+#define SDEIER 0xc400c
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG 0xc4030
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0)
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
+#define PORTD_PULSE_DURATION_6ms (2 << 18)
+#define PORTD_PULSE_DURATION_100ms (3 << 18)
+#define PORTD_HOTPLUG_NO_DETECT (0)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define PORTC_PULSE_DURATION_2ms (0)
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
+#define PORTC_PULSE_DURATION_6ms (2 << 10)
+#define PORTC_PULSE_DURATION_100ms (3 << 10)
+#define PORTC_HOTPLUG_NO_DETECT (0)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define PORTB_PULSE_DURATION_2ms (0)
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
+#define PORTB_PULSE_DURATION_6ms (2 << 2)
+#define PORTB_PULSE_DURATION_100ms (3 << 2)
+#define PORTB_HOTPLUG_NO_DETECT (0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
+
+#define PCH_GPIOA 0xc5010
+#define PCH_GPIOB 0xc5014
+#define PCH_GPIOC 0xc5018
+#define PCH_GPIOD 0xc501c
+#define PCH_GPIOE 0xc5020
+#define PCH_GPIOF 0xc5024
+
+#define PCH_DPLL_A 0xc6014
+#define PCH_DPLL_B 0xc6018
+
+#define PCH_FPA0 0xc6040
+#define PCH_FPA1 0xc6044
+#define PCH_FPB0 0xc6048
+#define PCH_FPB1 0xc604c
+
+#define PCH_DPLL_TEST 0xc606c
+
+#define PCH_DREF_CONTROL 0xC6200
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
+#define DREF_SSC_SOURCE_DISABLE (0<<11)
+#define DREF_SSC_SOURCE_ENABLE (2<<11)
+#define DREF_SSC_SOURCE_MASK (2<<11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
+#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
+#define DREF_NONSPREAD_SOURCE_MASK (2<<9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SSC4_DOWNSPREAD (0<<6)
+#define DREF_SSC4_CENTERSPREAD (1<<6)
+#define DREF_SSC1_DISABLE (0<<1)
+#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ 0xc6204
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3<<10)
+#define RAWCLK_FREQ_MASK 0x3ff
+
+#define PCH_DPLL_TMR_CFG 0xc6208
+
+#define PCH_SSC4_PARMS 0xc6210
+#define PCH_SSC4_AUX_PARMS 0xc6214
+
+/* transcoder */
+
+#define TRANS_HTOTAL_A 0xe0000
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+#define TRANS_HBLANK_A 0xe0004
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+#define TRANS_HSYNC_A 0xe0008
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+#define TRANS_VTOTAL_A 0xe000c
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+#define TRANS_VBLANK_A 0xe0010
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+#define TRANS_VSYNC_A 0xe0014
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+
+#define TRANSA_DATA_M1 0xe0030
+#define TRANSA_DATA_N1 0xe0034
+#define TRANSA_DATA_M2 0xe0038
+#define TRANSA_DATA_N2 0xe003c
+#define TRANSA_DP_LINK_M1 0xe0040
+#define TRANSA_DP_LINK_N1 0xe0044
+#define TRANSA_DP_LINK_M2 0xe0048
+#define TRANSA_DP_LINK_N2 0xe004c
+
+#define TRANS_HTOTAL_B 0xe1000
+#define TRANS_HBLANK_B 0xe1004
+#define TRANS_HSYNC_B 0xe1008
+#define TRANS_VTOTAL_B 0xe100c
+#define TRANS_VBLANK_B 0xe1010
+#define TRANS_VSYNC_B 0xe1014
+
+#define TRANSB_DATA_M1 0xe1030
+#define TRANSB_DATA_N1 0xe1034
+#define TRANSB_DATA_M2 0xe1038
+#define TRANSB_DATA_N2 0xe103c
+#define TRANSB_DP_LINK_M1 0xe1040
+#define TRANSB_DP_LINK_N1 0xe1044
+#define TRANSB_DP_LINK_M2 0xe1048
+#define TRANSB_DP_LINK_N2 0xe104c
+
+#define TRANSACONF 0xf0008
+#define TRANSBCONF 0xf1008
+#define TRANS_DISABLE (0<<31)
+#define TRANS_ENABLE (1<<31)
+#define TRANS_STATE_MASK (1<<30)
+#define TRANS_STATE_DISABLE (0<<30)
+#define TRANS_STATE_ENABLE (1<<30)
+#define TRANS_FSYNC_DELAY_HB1 (0<<27)
+#define TRANS_FSYNC_DELAY_HB2 (1<<27)
+#define TRANS_FSYNC_DELAY_HB3 (2<<27)
+#define TRANS_FSYNC_DELAY_HB4 (3<<27)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_VIDEO_AUDIO (0<<26)
+#define TRANS_PROGRESSIVE (0<<21)
+#define TRANS_8BPC (0<<5)
+#define TRANS_10BPC (1<<5)
+#define TRANS_6BPC (2<<5)
+#define TRANS_12BPC (3<<5)
+
+#define FDI_RXA_CHICKEN 0xc200c
+#define FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
+
+/* CPU: FDI_TX */
+#define FDI_TXA_CTL 0x60100
+#define FDI_TXB_CTL 0x61100
+#define FDI_TX_DISABLE (0<<31)
+#define FDI_TX_ENABLE (1<<31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
+#define FDI_LINK_TRAIN_NONE (3<<28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+#define FDI_DP_PORT_WIDTH_X1 (0<<19)
+#define FDI_DP_PORT_WIDTH_X2 (1<<19)
+#define FDI_DP_PORT_WIDTH_X3 (2<<19)
+#define FDI_DP_PORT_WIDTH_X4 (3<<19)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+/* IGDNG: hardwired to 1 */
+#define FDI_TX_PLL_ENABLE (1<<14)
+/* both Tx and Rx */
+#define FDI_SCRAMBLING_ENABLE (0<<7)
+#define FDI_SCRAMBLING_DISABLE (1<<7)
+
+/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+#define FDI_RXA_CTL 0xf000c
+#define FDI_RXB_CTL 0xf100c
+#define FDI_RX_ENABLE (1<<31)
+#define FDI_RX_DISABLE (0<<31)
+/* train, dp width same as FDI_TX */
+#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_8BPC (0<<16)
+#define FDI_10BPC (1<<16)
+#define FDI_6BPC (2<<16)
+#define FDI_12BPC (3<<16)
+#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
+#define FDI_RX_PLL_ENABLE (1<<13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
+#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
+#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
+#define FDI_SEL_RAWCLK (0<<4)
+#define FDI_SEL_PCDCLK (1<<4)
+
+#define FDI_RXA_MISC 0xf0010
+#define FDI_RXB_MISC 0xf1010
+#define FDI_RXA_TUSIZE1 0xf0030
+#define FDI_RXA_TUSIZE2 0xf0038
+#define FDI_RXB_TUSIZE1 0xf1030
+#define FDI_RXB_TUSIZE2 0xf1038
+
+/* FDI_RX interrupt register format */
+#define FDI_RX_INTER_LANE_ALIGN (1<<10)
+#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
+#define FDI_RX_FS_CODE_ERR (1<<6)
+#define FDI_RX_FE_CODE_ERR (1<<5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
+#define FDI_RX_HDCP_LINK_FAIL (1<<3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+
+#define FDI_RXA_IIR 0xf0014
+#define FDI_RXA_IMR 0xf0018
+#define FDI_RXB_IIR 0xf1014
+#define FDI_RXB_IMR 0xf1018
+
+#define FDI_PLL_CTL_1 0xfe000
+#define FDI_PLL_CTL_2 0xfe004
+
+/* CRT */
+#define PCH_ADPA 0xe1100
+#define ADPA_TRANS_SELECT_MASK (1<<30)
+#define ADPA_TRANS_A_SELECT 0
+#define ADPA_TRANS_B_SELECT (1<<30)
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+
+/* or SDVOB */
+#define HDMIB 0xe1140
+#define PORT_ENABLE (1 << 31)
+#define TRANSCODER_A (0)
+#define TRANSCODER_B (1 << 30)
+#define COLOR_FORMAT_8bpc (0)
+#define COLOR_FORMAT_12bpc (3 << 26)
+#define SDVOB_HOTPLUG_ENABLE (1 << 23)
+#define SDVO_ENCODING (0)
+#define TMDS_ENCODING (2 << 10)
+#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
+#define SDVOB_BORDER_ENABLE (1 << 7)
+#define AUDIO_ENABLE (1 << 6)
+#define VSYNC_ACTIVE_HIGH (1 << 4)
+#define HSYNC_ACTIVE_HIGH (1 << 3)
+#define PORT_DETECTED (1 << 2)
+
+#define HDMIC 0xe1150
+#define HDMID 0xe1160
+
+#define PCH_LVDS 0xe1180
+#define LVDS_DETECTED (1 << 1)
+
+#define BLC_PWM_CPU_CTL2 0x48250
+#define PWM_ENABLE (1 << 31)
+#define PWM_PIPE_A (0 << 29)
+#define PWM_PIPE_B (1 << 29)
+#define BLC_PWM_CPU_CTL 0x48254
+
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define PWM_PCH_ENABLE (1 << 31)
+#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
+#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
+#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
+#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
+
+#define BLC_PWM_PCH_CTL2 0xc8254
+
+#define PCH_PP_STATUS 0xc7200
+#define PCH_PP_CONTROL 0xc7204
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+#define PCH_PP_ON_DELAYS 0xc7208
+#define EDP_PANEL (1 << 30)
+#define PCH_PP_OFF_DELAYS 0xc720c
+#define PCH_PP_DIVISOR 0xc7210
+
+#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
+#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
+#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
+#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
+#define PCI_DEVICE_ID_INTEL_82946_GZ 0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82Q963_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82G965_IG 0x29a2
+#define PCI_DEVICE_ID_INTEL_GM965_IG 0x2a02
+#define PCI_DEVICE_ID_INTEL_GME965_IG 0x2a12
+#define PCI_DEVICE_ID_INTEL_82G33_IG 0x29c2
+#define PCI_DEVICE_ID_INTEL_82Q35_IG 0x29b2
+#define PCI_DEVICE_ID_INTEL_82Q33_IG 0x29d2
+#define PCI_DEVICE_ID_INTEL_CANTIGA_IG 0x2a42
+#define PCI_DEVICE_ID_INTEL_EL_IG 0x2e02
+#define PCI_DEVICE_ID_INTEL_82Q45_IG 0x2e12
+#define PCI_DEVICE_ID_INTEL_82G45_IG 0x2e22
+#define PCI_DEVICE_ID_INTEL_82G41_IG 0x2e32
+#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x42
+#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x46
+#define PCI_DEVICE_ID_INTEL_82B43_IG 0x2e42
+
+
+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
+
+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GME_IG)
+
+#define IS_IGDNG_D(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_D_IG)
+#define IS_IGDNG_M(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_M_IG)
+#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+
+#define IS_I965G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82946_GZ || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G35_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q963_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G965_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_GM965_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_GME965_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_CANTIGA_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_EL_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q45_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G45_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82B43_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_D_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_M_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G41_IG)
+
+#define IS_I965GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_GM965_IG)
+
+#define IS_GM45(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_CANTIGA_IG)
+
+#define IS_G4X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_EL_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q45_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G45_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82B43_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G41_IG)
+
+#define IS_G33(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82G33_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q35_IG || \
+ (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q33_IG)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
+ IS_IGDNG(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
+ IS_IGDNG_M(dev))
+
+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+
+#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
+ IS_IGDNG(dev))
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+
+#endif /* _I915_DRV_H */
diff --git a/usr/src/uts/intel/io/drm/i915_gem.c b/usr/src/uts/intel/io/drm/i915_gem.c
new file mode 100644
index 0000000..5570c2d
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_gem.c
@@ -0,0 +1,2919 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/x86_archext.h>
+#include <sys/vfs_opreg.h>
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#ifndef roundup
+#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
+#endif /* !roundup */
+
+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
+static timeout_id_t worktimer_id = NULL;
+
+extern int drm_mm_init(struct drm_mm *mm,
+ unsigned long start, unsigned long size);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern int choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
+ int vacalign, uint_t flags);
+
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
+static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
+static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+ int write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+
+/*ARGSUSED*/
+int
+i915_gem_init_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_init args;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_init *) data, sizeof(args));
+
+ spin_lock(&dev->struct_mutex);
+
+ if ((args.gtt_start >= args.gtt_end) ||
+ ((args.gtt_start & (PAGE_SIZE - 1)) != 0) ||
+ ((args.gtt_end & (PAGE_SIZE - 1)) != 0)) {
+ spin_unlock(&dev->struct_mutex);
+ DRM_ERROR("i915_gem_init_ioctel invalid arg 0x%lx args.start 0x%lx end 0x%lx", &args, args.gtt_start, args.gtt_end);
+ return EINVAL;
+ }
+
+ dev->gtt_total = (uint32_t) (args.gtt_end - args.gtt_start);
+
+ (void) drm_mm_init(&dev_priv->mm.gtt_space,
+ (unsigned long) args.gtt_start, dev->gtt_total);
+ DRM_DEBUG("i915_gem_init_ioctl dev->gtt_total %x, dev_priv->mm.gtt_space 0x%x gtt_start 0x%lx", dev->gtt_total, dev_priv->mm.gtt_space, args.gtt_start);
+ ASSERT(dev->gtt_total != 0);
+
+ spin_unlock(&dev->struct_mutex);
+
+
+ return 0;
+}
+
+/*ARGSUSED*/
+int
+i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_get_aperture args;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ args.aper_size = (uint64_t)dev->gtt_total;
+ args.aper_available_size = (args.aper_size -
+ atomic_read(&dev->pin_memory));
+
+ ret = DRM_COPY_TO_USER((struct drm_i915_gem_get_aperture __user *) data, &args, sizeof(args));
+
+ if ( ret != 0)
+ DRM_ERROR(" i915_gem_get_aperture_ioctl error! %d", ret);
+
+ DRM_DEBUG("i915_gem_get_aaperture_ioctl called sizeof %d, aper_size 0x%x, aper_available_size 0x%x\n", sizeof(args), dev->gtt_total, args.aper_available_size);
+
+ return 0;
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+/*ARGSUSED*/
+int
+i915_gem_create_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_create args;
+ struct drm_gem_object *obj;
+ int handlep;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_create *) data, sizeof(args));
+
+
+ args.size = (uint64_t) roundup(args.size, PAGE_SIZE);
+
+ if (args.size == 0) {
+ DRM_ERROR("Invalid obj size %d", args.size);
+ return EINVAL;
+ }
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args.size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to alloc obj");
+ return ENOMEM;
+ }
+
+ ret = drm_gem_handle_create(fpriv, obj, &handlep);
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ args.handle = handlep;
+
+ ret = DRM_COPY_TO_USER((struct drm_i915_gem_create *) data, &args, sizeof(args));
+
+ if ( ret != 0)
+ DRM_ERROR(" gem create error! %d", ret);
+
+ DRM_DEBUG("i915_gem_create_ioctl object name %d, size 0x%lx, list 0x%lx, obj 0x%lx",handlep, args.size, &fpriv->object_idr, obj);
+
+ return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+/*ARGSUSED*/
+int
+i915_gem_pread_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_pread args;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_pread __user *) data, sizeof(args));
+
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return EBADF;
+
+ /* Bounds check source.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args.offset > obj->size || args.size > obj->size ||
+ args.offset + args.size > obj->size) {
+ drm_gem_object_unreference(obj);
+ DRM_ERROR("i915_gem_pread_ioctl invalid args");
+ return EINVAL;
+ }
+
+ spin_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args.offset, args.size);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ DRM_ERROR("pread failed to read domain range ret %d!!!", ret);
+ return EFAULT;
+ }
+
+ unsigned long unwritten = 0;
+ uint32_t *user_data;
+ user_data = (uint32_t *) (uintptr_t) args.data_ptr;
+
+ unwritten = DRM_COPY_TO_USER(user_data, obj->kaddr + args.offset, args.size);
+ if (unwritten) {
+ ret = EFAULT;
+ DRM_ERROR("i915_gem_pread error!!! unwritten %d", unwritten);
+ }
+
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/*ARGSUSED*/
+static int
+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ uint32_t *user_data;
+ int ret = 0;
+ unsigned long unwritten = 0;
+
+ user_data = (uint32_t *) (uintptr_t) args->data_ptr;
+ spin_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(obj, 0);
+ if (ret) {
+ spin_unlock(&dev->struct_mutex);
+ DRM_ERROR("i915_gem_gtt_pwrite failed to pin ret %d", ret);
+ return ret;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ goto err;
+
+ DRM_DEBUG("obj %d write domain 0x%x read domain 0x%x", obj->name, obj->write_domain, obj->read_domains);
+
+ unwritten = DRM_COPY_FROM_USER(obj->kaddr + args->offset, user_data, args->size);
+ if (unwritten) {
+ ret = EFAULT;
+ DRM_ERROR("i915_gem_gtt_pwrite error!!! unwritten %d", unwritten);
+ goto err;
+ }
+
+err:
+ i915_gem_object_unpin(obj);
+ spin_unlock(&dev->struct_mutex);
+ if (ret)
+ DRM_ERROR("i915_gem_gtt_pwrite error %d", ret);
+ return ret;
+}
+
+/*ARGSUSED*/
+int
+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ DRM_ERROR(" i915_gem_shmem_pwrite Not support");
+ return -1;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+/*ARGSUSED*/
+int
+i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_pwrite args;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ ret = DRM_COPY_FROM_USER(&args,
+ (struct drm_i915_gem_pwrite __user *) data, sizeof(args));
+ if (ret)
+ DRM_ERROR("i915_gem_pwrite_ioctl failed to copy from user");
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return EBADF;
+ obj_priv = obj->driver_private;
+ DRM_DEBUG("i915_gem_pwrite_ioctl, obj->name %d",obj->name);
+
+ /* Bounds check destination.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args.offset > obj->size || args.size > obj->size ||
+ args.offset + args.size > obj->size) {
+ drm_gem_object_unreference(obj);
+ DRM_ERROR("i915_gem_pwrite_ioctl invalid arg");
+ return EINVAL;
+ }
+
+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
+ * it would end up going through the fenced access, and we'll get
+ * different detiling behavior between reading and writing.
+ * pread/pwrite currently are reading and writing from the CPU
+ * perspective, requiring manual detiling by the client.
+ */
+ if (obj_priv->tiling_mode == I915_TILING_NONE &&
+ dev->gtt_total != 0)
+ ret = i915_gem_gtt_pwrite(dev, obj, &args, fpriv);
+ else
+ ret = i915_gem_shmem_pwrite(dev, obj, &args, fpriv);
+
+ if (ret)
+ DRM_ERROR("pwrite failed %d\n", ret);
+
+ drm_gem_object_unreference(obj);
+
+ return ret;
+}
+
+/**
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
+ */
+/*ARGSUSED*/
+int
+i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_set_domain args;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_set_domain __user *) data, sizeof(args));
+
+ uint32_t read_domains = args.read_domains;
+ uint32_t write_domain = args.write_domain;
+
+ /* Only handle setting domains to types used by the CPU. */
+ if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ ret = EINVAL;
+
+ if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ ret = EINVAL;
+
+ /* Having something in the write domain implies it's in the read
+ * domain, and only that read domain. Enforce that in the request.
+ */
+ if (write_domain != 0 && read_domains != write_domain)
+ ret = EINVAL;
+ if (ret) {
+ DRM_ERROR("set_domain invalid read or write");
+ return EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return EBADF;
+
+ spin_lock(&dev->struct_mutex);
+ DRM_DEBUG("set_domain_ioctl %p(name %d size 0x%x), %08x %08x\n",
+ obj, obj->name, obj->size, args.read_domains, args.write_domain);
+
+ if (read_domains & I915_GEM_DOMAIN_GTT) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+ /* Silently promote "you're not bound, there was nothing to do"
+ * to success, since the client was just asking us to
+ * make sure everything was done.
+ */
+ if (ret == EINVAL)
+ ret = 0;
+ } else {
+ ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ }
+
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ if (ret)
+ DRM_ERROR("i915_set_domain_ioctl ret %d", ret);
+ return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+/*ARGSUSED*/
+int
+i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_sw_finish args;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_sw_finish __user *) data, sizeof(args));
+
+ spin_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL) {
+ spin_unlock(&dev->struct_mutex);
+ return EBADF;
+ }
+
+ DRM_DEBUG("%s: sw_finish %d (%p name %d size 0x%x)\n",
+ __func__, args.handle, obj, obj->name, obj->size);
+
+ obj_priv = obj->driver_private;
+ /* Pinned buffers may be scanout, so flush the cache */
+ if (obj_priv->pin_count)
+ {
+ i915_gem_object_flush_cpu_write_domain(obj);
+ }
+
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+/*ARGSUSED*/
+int
+i915_gem_mmap_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_mmap args;
+ struct drm_gem_object *obj;
+ caddr_t vvaddr = NULL;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(
+ &args, (struct drm_i915_gem_mmap __user *)data,
+ sizeof (struct drm_i915_gem_mmap));
+
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return EBADF;
+
+ ret = ddi_devmap_segmap(fpriv->dev, (off_t)obj->map->handle,
+ ttoproc(curthread)->p_as, &vvaddr, obj->map->size,
+ PROT_ALL, PROT_ALL, MAP_SHARED, fpriv->credp);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ args.addr_ptr = (uint64_t)(uintptr_t)vvaddr;
+
+ DRM_COPYTO_WITH_RETURN(
+ (struct drm_i915_gem_mmap __user *)data,
+ &args, sizeof (struct drm_i915_gem_mmap));
+
+ return 0;
+}
+
+static void
+i915_gem_object_free_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ if (obj_priv->page_list == NULL)
+ return;
+
+ kmem_free(obj_priv->page_list,
+ btop(obj->size) * sizeof(caddr_t));
+
+ obj_priv->page_list = NULL;
+}
+
+static void
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj_priv->active) {
+ drm_gem_object_reference(obj);
+ obj_priv->active = 1;
+ }
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.active_list, (caddr_t)obj_priv);
+ obj_priv->last_rendering_seqno = seqno;
+}
+
+static void
+i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list, (caddr_t)obj_priv);
+ obj_priv->last_rendering_seqno = 0;
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count != 0)
+ {
+ list_del_init(&obj_priv->list);
+ } else {
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list, (caddr_t)obj_priv);
+ }
+ obj_priv->last_rendering_seqno = 0;
+ if (obj_priv->active) {
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static uint32_t
+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ uint32_t seqno;
+ int was_empty;
+ RING_LOCALS;
+
+ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+ if (request == NULL) {
+ DRM_ERROR("Failed to alloc request");
+ return 0;
+ }
+ /* Grab the seqno we're going to make this request be, and bump the
+ * next (skipping 0 so it can be the reserved no-seqno value).
+ */
+ seqno = dev_priv->mm.next_gem_seqno;
+ dev_priv->mm.next_gem_seqno++;
+ if (dev_priv->mm.next_gem_seqno == 0)
+ dev_priv->mm.next_gem_seqno++;
+
+ DRM_DEBUG("add_request seqno = %d dev 0x%lx", seqno, dev);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+ BEGIN_LP_RING(2);
+ OUT_RING(0);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+
+ request->seqno = seqno;
+ request->emitted_jiffies = jiffies;
+ was_empty = list_empty(&dev_priv->mm.request_list);
+ list_add_tail(&request->list, &dev_priv->mm.request_list, (caddr_t)request);
+
+ /* Associate any objects on the flushing list matching the write
+ * domain we're flushing with our flush.
+ */
+ if (flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ obj_priv = list_entry(dev_priv->mm.flushing_list.next, struct drm_i915_gem_object, list),
+ next = list_entry(obj_priv->list.next, struct drm_i915_gem_object, list);
+ for(; &obj_priv->list != &dev_priv->mm.flushing_list;
+ obj_priv = next,
+ next = list_entry(next->list.next, struct drm_i915_gem_object, list)) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+ }
+ }
+
+ }
+
+ if (was_empty && !dev_priv->mm.suspended)
+ {
+ /* change to delay HZ and then run work (not insert to workqueue of Linux) */
+ worktimer_id = timeout(i915_gem_retire_work_handler, (void *) dev, DRM_HZ);
+ DRM_DEBUG("i915_gem: schedule_delayed_work");
+ }
+ return seqno;
+}
+
+/**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+uint32_t
+i915_retire_commands(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ uint32_t flush_domains = 0;
+ RING_LOCALS;
+
+ /* The sampler always gets flushed on i965 (sigh) */
+ if (IS_I965G(dev))
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+
+ return flush_domains;
+}
+
+/**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+static void
+i915_gem_retire_request(struct drm_device *dev,
+ struct drm_i915_gem_request *request)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_entry(dev_priv->mm.active_list.next,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ /* If the seqno being retired doesn't match the oldest in the
+ * list, then the oldest in the list must still be newer than
+ * this seqno.
+ */
+ if (obj_priv->last_rendering_seqno != request->seqno)
+ return;
+
+ DRM_DEBUG("%s: retire %d moves to inactive list %p\n",
+ __func__, request->seqno, obj);
+
+ if (obj->write_domain != 0) {
+ i915_gem_object_move_to_flushing(obj);
+ } else {
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+}
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+uint32_t
+i915_get_gem_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+
+ seqno = i915_get_gem_seqno(dev);
+
+ while (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+ uint32_t retiring_seqno;
+ request = (struct drm_i915_gem_request *)(uintptr_t)(dev_priv->mm.request_list.next->contain_ptr);
+ retiring_seqno = request->seqno;
+
+ if (i915_seqno_passed(seqno, retiring_seqno) ||
+ dev_priv->mm.wedged) {
+ i915_gem_retire_request(dev, request);
+
+ list_del(&request->list);
+ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+ } else
+ break;
+ }
+}
+
+void
+i915_gem_retire_work_handler(void *device)
+{
+ struct drm_device *dev = (struct drm_device *)device;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ spin_lock(&dev->struct_mutex);
+
+ /* Return if gem idle */
+ if (worktimer_id == NULL) {
+ spin_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ i915_gem_retire_requests(dev);
+ if (!dev_priv->mm.suspended && !list_empty(&dev_priv->mm.request_list))
+ {
+ DRM_DEBUG("i915_gem: schedule_delayed_work");
+ worktimer_id = timeout(i915_gem_retire_work_handler, (void *) dev, DRM_HZ);
+ }
+ spin_unlock(&dev->struct_mutex);
+}
+
+/**
+ * i965_reset - reset chip after a hang
+ * @dev: drm device to reset
+ * @flags: reset domains
+ *
+ * Reset the chip. Useful if a hang is detected.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+void i965_reset(struct drm_device *dev, u8 flags)
+{
+ ddi_acc_handle_t conf_hdl;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int timeout = 0;
+ uint8_t gdrst;
+
+ if (flags & GDRST_FULL)
+ i915_save_display(dev);
+
+ if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
+ DRM_ERROR(("i915_reset: pci_config_setup fail"));
+ return;
+ }
+
+ /*
+ * Set the reset bit, wait for reset, then clear it. Hardware
+ * will clear the status bit (bit 1) when it's actually ready
+ * for action again.
+ */
+ gdrst = pci_config_get8(conf_hdl, GDRST);
+ pci_config_put8(conf_hdl, GDRST, gdrst | flags);
+ drv_usecwait(50);
+ pci_config_put8(conf_hdl, GDRST, gdrst | 0xfe);
+
+ /* ...we don't want to loop forever though, 500ms should be plenty */
+ do {
+ drv_usecwait(100);
+ gdrst = pci_config_get8(conf_hdl, GDRST);
+ } while ((gdrst & 2) && (timeout++ < 5));
+
+ /* Ok now get things going again... */
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there. Fortunately we don't need to do this unless we reset the
+ * chip at a PCI level.
+ *
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ if (!dev_priv->mm.suspended) {
+ drm_i915_ring_buffer_t *ring = &dev_priv->ring;
+ struct drm_gem_object *obj = ring->ring_obj;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ dev_priv->mm.suspended = 0;
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(PRB0_CTL, 0);
+ I915_WRITE(PRB0_TAIL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+ I915_WRITE(PRB0_CTL,
+ ((obj->size - 4096) & RING_NR_PAGES) |
+ RING_NO_REPORT |
+ RING_VALID);
+ i915_kernel_lost_context(dev);
+
+ (void) drm_irq_install(dev);
+ }
+
+ /*
+ * Display needs restore too...
+ */
+ if (flags & GDRST_FULL)
+ i915_restore_display(dev);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 ier;
+ int ret = 0;
+
+ ASSERT(seqno != 0);
+
+ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+ if (IS_IGDNG(dev))
+ ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else
+ ier = I915_READ(IER);
+ if (!ier) {
+ DRM_ERROR("something (likely vbetool) disabled "
+ "interrupts, re-enabling\n");
+ (void) i915_driver_irq_preinstall(dev);
+ i915_driver_irq_postinstall(dev);
+ }
+
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ i915_user_irq_on(dev);
+ DRM_WAIT(ret, &dev_priv->irq_queue,
+ (i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+ dev_priv->mm.wedged));
+ i915_user_irq_off(dev);
+ dev_priv->mm.waiting_gem_seqno = 0;
+ }
+ if (dev_priv->mm.wedged) {
+ ret = EIO;
+ }
+
+ /* GPU maybe hang, reset needed*/
+ if (ret == -2 && (seqno > i915_get_gem_seqno(dev))) {
+ if (IS_I965G(dev)) {
+ DRM_ERROR("GPU hang detected try to reset ... wait for irq_queue seqno %d, now seqno %d", seqno, i915_get_gem_seqno(dev));
+ dev_priv->mm.wedged = 1;
+ i965_reset(dev, GDRST_RENDER);
+ i915_gem_retire_requests(dev);
+ dev_priv->mm.wedged = 0;
+ }
+ else
+ DRM_ERROR("GPU hang detected.... reboot required");
+ return 0;
+ }
+ /* Directly dispatch request retiring. While we have the work queue
+ * to handle this, the waiter on a request often wants an associated
+ * buffer to have made it to the inactive list, and we would need
+ * a separate wait queue to handle that.
+ */
+ if (ret == 0)
+ i915_gem_retire_requests(dev);
+
+ return ret;
+}
+
+static void
+i915_gem_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t cmd;
+ RING_LOCALS;
+
+ DRM_DEBUG("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+
+ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) {
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+ DRM_DEBUG("%s: queue flush %08x to ring\n", __func__, cmd);
+
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ }
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret, seqno;
+
+ /* This function only exists to support waiting for existing rendering,
+ * not for emitting required flushes.
+ */
+
+ if((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0) {
+ DRM_ERROR("write domain should not be GPU DOMAIN %d", obj_priv->active);
+ return 0;
+ }
+
+ /* If there is rendering queued on the buffer being evicted, wait for
+ * it.
+ */
+ if (obj_priv->active) {
+ DRM_DEBUG("%s: object %d %p wait for seqno %08x\n",
+ __func__, obj->name, obj, obj_priv->last_rendering_seqno);
+
+ seqno = obj_priv->last_rendering_seqno;
+ if (seqno == 0) {
+ DRM_DEBUG("last rendering maybe finished");
+ return 0;
+ }
+ ret = i915_wait_request(dev, seqno);
+ if (ret != 0) {
+ DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+int
+i915_gem_object_unbind(struct drm_gem_object *obj, uint32_t type)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret = 0;
+
+ if (obj_priv->gtt_space == NULL)
+ return 0;
+
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Attempting to unbind pinned buffer\n");
+ return EINVAL;
+ }
+
+ /* Wait for any rendering to complete
+ */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret) {
+ DRM_ERROR("wait_rendering failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it. This will
+ * also ensure that all pending GPU writes are finished
+ * before we unbind.
+ */
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret) {
+ DRM_ERROR("set_domain failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!obj_priv->agp_mem) {
+ (void) drm_agp_unbind_pages(dev, obj->size / PAGE_SIZE,
+ obj_priv->gtt_offset, type);
+ obj_priv->agp_mem = -1;
+ }
+
+ ASSERT(!obj_priv->active);
+
+ i915_gem_object_free_page_list(obj);
+
+ if (obj_priv->gtt_space) {
+ atomic_dec(&dev->gtt_count);
+ atomic_sub(obj->size, &dev->gtt_memory);
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ }
+
+ /* Remove ourselves from the LRU list if present. */
+ if (!list_empty(&obj_priv->list))
+ list_del_init(&obj_priv->list);
+
+ return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ for (;;) {
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list)) {
+ obj_priv = list_entry(dev_priv->mm.inactive_list.next,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+ ASSERT(!(obj_priv->pin_count != 0));
+ DRM_DEBUG("%s: evicting %d\n", __func__, obj->name);
+ ASSERT(!(obj_priv->active));
+ /* Wait on the rendering and unbind the buffer. */
+ ret = i915_gem_object_unbind(obj, 1);
+ break;
+ }
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for one of those things to finish and hopefully
+ * leave us a buffer to evict.
+ */
+ if (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_entry(dev_priv->mm.request_list.next,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret) {
+ break;
+ }
+ /* if waiting caused an object to become inactive,
+ * then loop around and wait for it. Otherwise, we
+ * assume that waiting freed and unbound something,
+ * so there should now be some space in the GTT
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list))
+ continue;
+ break;
+ }
+
+ /* If we didn't have anything on the request list but there
+ * are buffers awaiting a flush, emit one and try again.
+ * When we wait on it, those buffers waiting for that flush
+ * will get moved to inactive.
+ */
+ if (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_entry(dev_priv->mm.flushing_list.next,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ (void) i915_add_request(dev, obj->write_domain);
+
+ obj = NULL;
+ continue;
+ }
+
+ DRM_ERROR("inactive empty %d request empty %d "
+ "flushing empty %d\n",
+ list_empty(&dev_priv->mm.inactive_list),
+ list_empty(&dev_priv->mm.request_list),
+ list_empty(&dev_priv->mm.flushing_list));
+ /* If we didn't do any of the above, there's nothing to be done
+ * and we just can't fit it in.
+ */
+ return ENOMEM;
+ }
+ return ret;
+}
+
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ int ret;
+
+ for (;;) {
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0)
+ break;
+ }
+ if (ret == ENOMEM)
+ return 0;
+ else
+ DRM_ERROR("evict_everything ret %d", ret);
+ return ret;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, uint32_t alignment)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_mm_node *free_space;
+ int page_count, ret;
+
+ if (dev_priv->mm.suspended)
+ return EBUSY;
+ if (alignment == 0)
+ alignment = PAGE_SIZE;
+ if (alignment & (PAGE_SIZE - 1)) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return EINVAL;
+ }
+
+ if (obj_priv->gtt_space) {
+ DRM_ERROR("Already bind!!");
+ return 0;
+ }
+search_free:
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ (unsigned long) obj->size, alignment, 0);
+ if (free_space != NULL) {
+ obj_priv->gtt_space = drm_mm_get_block(free_space, (unsigned long) obj->size,
+ alignment);
+ if (obj_priv->gtt_space != NULL) {
+ obj_priv->gtt_space->private = obj;
+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
+ }
+ }
+ if (obj_priv->gtt_space == NULL) {
+ /* If the gtt is empty and we're still having trouble
+ * fitting our object in, we're out of memory.
+ */
+ if (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list)) {
+ DRM_ERROR("GTT full, but LRU list empty\n");
+ return ENOMEM;
+ }
+
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0) {
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ return ret;
+ }
+ goto search_free;
+ }
+
+ ret = i915_gem_object_get_page_list(obj);
+ if (ret) {
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ DRM_ERROR("bind to gtt failed to get page list");
+ return ret;
+ }
+
+ page_count = obj->size / PAGE_SIZE;
+ /* Create an AGP memory structure pointing at our pages, and bind it
+ * into the GTT.
+ */
+ DRM_DEBUG("Binding object %d of page_count %d at gtt_offset 0x%x obj->pfnarray = 0x%lx",
+ obj->name, page_count, obj_priv->gtt_offset, obj->pfnarray);
+
+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
+ obj->pfnarray,
+ page_count,
+ obj_priv->gtt_offset);
+ if (obj_priv->agp_mem) {
+ i915_gem_object_free_page_list(obj);
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ DRM_ERROR("Failed to bind pages obj %d, obj 0x%lx", obj->name, obj);
+ return ENOMEM;
+ }
+ atomic_inc(&dev->gtt_count);
+ atomic_add(obj->size, &dev->gtt_memory);
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ ASSERT(!(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)));
+ ASSERT(!(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)));
+
+ return 0;
+}
+
+void
+i915_gem_clflush_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+
+ if (obj_priv->page_list == NULL)
+ return;
+ drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+}
+
+/** Flushes any GPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ uint32_t seqno;
+
+ if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return;
+
+ /* Queue the GPU write cache flushing we need. */
+ i915_gem_flush(dev, 0, obj->write_domain);
+ seqno = i915_add_request(dev, obj->write_domain);
+ DRM_DEBUG("flush_gpu_write_domain seqno = %d", seqno);
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+{
+ if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ return;
+
+ /* No actual flushing is required for the GTT write domain. Writes
+ * to it immediately go to main memory as far as we know, so there's
+ * no chipset flush. It also doesn't land in render cache.
+ */
+ obj->write_domain = 0;
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ return;
+
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0) {
+ DRM_ERROR("set_to_gtt_domain wait_rendering ret %d", ret);
+ return ret;
+ }
+ /* If we're writing through the GTT domain, then CPU and GPU caches
+ * will need to be invalidated at next use.
+ */
+ if (write)
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ DRM_DEBUG("i915_gem_object_set_to_gtt_domain obj->read_domains %x ", obj->read_domains);
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0));
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ /* If we have a partially-valid cache of the object in the CPU,
+ * finish invalidating it and free the per-page flags.
+ */
+ i915_gem_object_set_to_full_cpu_read_domain(obj);
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0));
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write) {
+ obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+
+ DRM_DEBUG("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+ __func__, obj,
+ obj->read_domains, read_domains,
+ obj->write_domain, write_domain);
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (write_domain == 0)
+ read_domains |= obj->read_domains;
+ else
+ obj_priv->dirty = 1;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->write_domain && obj->write_domain != read_domains) {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |= read_domains & ~obj->write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= read_domains & ~obj->read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+ DRM_DEBUG("%s: CPU domain flush %08x invalidate %08x\n",
+ __func__, flush_domains, invalidate_domains);
+ i915_gem_clflush_object(obj);
+ }
+
+ if ((write_domain | flush_domains) != 0)
+ obj->write_domain = write_domain;
+ obj->read_domains = read_domains;
+
+ dev->invalidate_domains |= invalidate_domains;
+ dev->flush_domains |= flush_domains;
+
+ DRM_DEBUG("%s: read %08x write %08x invalidate %08x flush %08x\n",
+ __func__,
+ obj->read_domains, obj->write_domain,
+ dev->invalidate_domains, dev->flush_domains);
+
+}
+
+/**
+ * Moves the object from a partially CPU read to a full one.
+ *
+ * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
+ * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
+ */
+static void
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (!obj_priv->page_cpu_valid)
+ return;
+
+ /* If we're partially in the CPU read domain, finish moving it in.
+ */
+ if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ int i;
+
+ for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+ drm_clflush_pages(obj_priv->page_list + i, 1);
+ }
+ drm_agp_chipset_flush(dev);
+ }
+
+ /* Free the page_cpu_valid mappings which are now stale, whether
+ * or not we've got I915_GEM_DOMAIN_CPU.
+ */
+ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ obj_priv->page_cpu_valid = NULL;
+}
+
+/**
+ * Set the CPU read domain on a range of the object.
+ *
+ * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
+ * not entirely valid. The page_cpu_valid member of the object flags which
+ * pages have been flushed, and will be respected by
+ * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
+ * of the whole object.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+
+ if (offset == 0 && size == obj->size)
+ return i915_gem_object_set_to_cpu_domain(obj, 0);
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ /* If we're already fully in the CPU read domain, we're done. */
+ if (obj_priv->page_cpu_valid == NULL &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ return 0;
+
+ /* Otherwise, create/clear the per-page CPU read domain flag if we're
+ * newly adding I915_GEM_DOMAIN_CPU
+ */
+ if (obj_priv->page_cpu_valid == NULL) {
+ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ if (obj_priv->page_cpu_valid == NULL)
+ return ENOMEM;
+ } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ (void) memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+
+ /* Flush the cache on any pages that are still invalid from the CPU's
+ * perspective.
+ */
+ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
+ i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+
+ drm_clflush_pages(obj_priv->page_list + i, 1);
+ obj_priv->page_cpu_valid[i] = 1;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0));
+
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+
+ return 0;
+}
+
+/**
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+static int
+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object *entry)
+{
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_relocation_entry __user *relocs;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+
+ /* Choose the GTT offset for our buffer and put it there. */
+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+ if (ret) {
+ DRM_ERROR("failed to pin");
+ return ret;
+ }
+ entry->offset = obj_priv->gtt_offset;
+
+ relocs = (struct drm_i915_gem_relocation_entry __user *)
+ (uintptr_t) entry->relocs_ptr;
+ /* Apply the relocations, using the GTT aperture to avoid cache
+ * flushing requirements.
+ */
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_obj_priv;
+ uint32_t reloc_val, reloc_offset, *reloc_entry;
+
+ ret = DRM_COPY_FROM_USER(&reloc, relocs + i, sizeof(reloc));
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ DRM_ERROR("failed to copy from user");
+ return ret;
+ }
+
+ target_obj = drm_gem_object_lookup(file_priv,
+ reloc.target_handle);
+ if (target_obj == NULL) {
+ i915_gem_object_unpin(obj);
+ return EBADF;
+ }
+ target_obj_priv = target_obj->driver_private;
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_obj_priv->gtt_space == NULL) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc.target_handle);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return EINVAL;
+ }
+
+ if (reloc.offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return EINVAL;
+ }
+ if (reloc.offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return EINVAL;
+ }
+
+ if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.read_domains,
+ reloc.write_domain);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return EINVAL;
+ }
+
+ if (reloc.write_domain && target_obj->pending_write_domain &&
+ reloc.write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.write_domain,
+ target_obj->pending_write_domain);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return EINVAL;
+ }
+ DRM_DEBUG("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc.offset,
+ (int) reloc.target_handle,
+ (int) reloc.read_domains,
+ (int) reloc.write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc.presumed_offset,
+ reloc.delta);
+
+ target_obj->pending_read_domains |= reloc.read_domains;
+ target_obj->pending_write_domain |= reloc.write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+ drm_gem_object_unreference(target_obj);
+ continue;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return EINVAL;
+ }
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+
+ int reloc_base = (reloc.offset & ~(PAGE_SIZE-1));
+ reloc_offset = reloc.offset & (PAGE_SIZE-1);
+ reloc_entry = (uint32_t *)(uintptr_t)(obj_priv->page_list[reloc_base/PAGE_SIZE] + reloc_offset);
+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+ *reloc_entry = reloc_val;
+
+ /* Write the updated presumed offset for this entry back out
+ * to the user.
+ */
+ reloc.presumed_offset = target_obj_priv->gtt_offset;
+ ret = DRM_COPY_TO_USER(relocs + i, &reloc, sizeof(reloc));
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ DRM_ERROR("%s: Failed to copy to user ret %d", __func__, ret);
+ return ret;
+ }
+
+ drm_gem_object_unreference(target_obj);
+ }
+
+ return 0;
+}
+
+/** Dispatch a batchbuffer to the ring
+ */
+static int
+i915_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer *exec,
+ uint64_t exec_offset)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+ (uintptr_t) exec->cliprects_ptr;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint64_t exec_start, exec_len;
+ RING_LOCALS;
+
+ exec_start = exec_offset + exec->batch_start_offset;
+ exec_len = exec->batch_len;
+
+ if ((exec_start | exec_len) & 0x7) {
+ DRM_ERROR("alignment\n");
+ return EINVAL;
+ }
+
+ if (!exec_start) {
+ DRM_ERROR("wrong arg");
+ return EINVAL;
+ }
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, boxes, i,
+ exec->DR1, exec->DR4);
+ if (ret) {
+ DRM_ERROR("i915_emit_box %d DR1 0x%lx DRI2 0x%lx", ret, exec->DR1, exec->DR4);
+ return ret;
+ }
+ }
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ OUT_RING(exec_start + exec_len - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(2);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6) |
+ (3 << 9) |
+ MI_BATCH_NON_SECURE_I965);
+ OUT_RING(exec_start);
+
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6));
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ }
+ ADVANCE_LP_RING();
+ }
+ }
+ /* XXX breadcrumb */
+ return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ int ret = 0;
+ uint32_t seqno;
+
+ spin_lock(&dev->struct_mutex);
+ seqno = i915_file_priv->mm.last_gem_throttle_seqno;
+ i915_file_priv->mm.last_gem_throttle_seqno =
+ i915_file_priv->mm.last_gem_seqno;
+ if (seqno) {
+ ret = i915_wait_request(dev, seqno);
+ if (ret != 0)
+ DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
+ }
+ spin_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/*ARGSUSED*/
+int
+i915_gem_execbuffer(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = fpriv->driver_priv;
+ struct drm_i915_gem_execbuffer args;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_gem_object **object_list = NULL;
+ struct drm_gem_object *batch_obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0, i, pinned = 0;
+ uint64_t exec_offset;
+ uint32_t seqno, flush_domains;
+ int pin_tries;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_execbuffer __user *) data, sizeof(args));
+
+ DRM_DEBUG("buffer_count %d len %x\n", args.buffer_count, args.batch_len);
+
+ if (args.buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args.buffer_count);
+ return EINVAL;
+ }
+ /* Copy in the exec list from userland */
+ exec_list = drm_calloc(sizeof(*exec_list), args.buffer_count,
+ DRM_MEM_DRIVER);
+ object_list = drm_calloc(sizeof(*object_list), args.buffer_count,
+ DRM_MEM_DRIVER);
+ if (exec_list == NULL || object_list == NULL) {
+ DRM_ERROR("Failed to allocate exec or object list "
+ "for %d buffers\n",
+ args.buffer_count);
+ ret = ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ ret = DRM_COPY_FROM_USER(exec_list,
+ (struct drm_i915_gem_exec_object __user *)
+ (uintptr_t) args.buffers_ptr,
+ sizeof(*exec_list) * args.buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args.buffer_count, ret);
+ goto pre_mutex_err;
+ }
+ spin_lock(&dev->struct_mutex);
+
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Execbuf while wedged\n");
+ spin_unlock(&dev->struct_mutex);
+ return EIO;
+ }
+
+ if (dev_priv->mm.suspended) {
+ DRM_ERROR("Execbuf while VT-switched.\n");
+ spin_unlock(&dev->struct_mutex);
+ return EBUSY;
+ }
+
+ /* Look up object handles */
+ for (i = 0; i < args.buffer_count; i++) {
+ object_list[i] = drm_gem_object_lookup(fpriv,
+ exec_list[i].handle);
+ if (object_list[i] == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec_list[i].handle, i);
+ ret = EBADF;
+ goto err;
+ }
+ obj_priv = object_list[i]->driver_private;
+ if (obj_priv->in_execbuffer) {
+ DRM_ERROR("Object[%d] (%d) %p appears more than once in object list in args.buffer_count %d \n",
+ i, object_list[i]->name, object_list[i], args.buffer_count);
+
+ ret = EBADF;
+ goto err;
+ }
+
+ obj_priv->in_execbuffer = 1;
+ }
+
+ /* Pin and relocate */
+ for (pin_tries = 0; ; pin_tries++) {
+ ret = 0;
+ for (i = 0; i < args.buffer_count; i++) {
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ fpriv,
+ &exec_list[i]);
+ if (ret) {
+ DRM_ERROR("Not all object pinned");
+ break;
+ }
+ pinned = i + 1;
+ }
+ /* success */
+ if (ret == 0)
+ {
+ DRM_DEBUG("gem_execbuffer pin_relocate success");
+ break;
+ }
+ /* error other than GTT full, or we've already tried again */
+ if (ret != ENOMEM || pin_tries >= 1) {
+ if (ret != ERESTART)
+ DRM_ERROR("Failed to pin buffers %d\n", ret);
+ goto err;
+ }
+
+ /* unpin all of our buffers */
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+ pinned = 0;
+
+ /* evict everyone we can from the aperture */
+ ret = i915_gem_evict_everything(dev);
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = object_list[args.buffer_count-1];
+ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+ batch_obj->pending_write_domain = 0;
+
+ /* Zero the gloabl flush/invalidate flags. These
+ * will be modified as each object is bound to the
+ * gtt
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
+ for (i = 0; i < args.buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+
+ /* Compute new gpu domains and update invalidate/flush */
+ i915_gem_object_set_to_gpu_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
+ }
+
+ if (dev->invalidate_domains | dev->flush_domains) {
+
+ DRM_DEBUG("%s: invalidate_domains %08x flush_domains %08x Then flush\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ if (dev->flush_domains) {
+ (void) i915_add_request(dev, dev->flush_domains);
+
+ }
+ }
+
+ for (i = 0; i < args.buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+
+ obj->write_domain = obj->pending_write_domain;
+ }
+
+ exec_offset = exec_list[args.buffer_count - 1].offset;
+
+ /* Exec the batchbuffer */
+ ret = i915_dispatch_gem_execbuffer(dev, &args, exec_offset);
+ if (ret) {
+ DRM_ERROR("dispatch failed %d\n", ret);
+ goto err;
+ }
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires
+ */
+ flush_domains = i915_retire_commands(dev);
+
+ /*
+ * Get a seqno representing the execution of the current buffer,
+ * which we can wait on. We would like to mitigate these interrupts,
+ * likely by only creating seqnos occasionally (so that we have
+ * *some* interrupts representing completion of buffers that we can
+ * wait on when trying to clear up gtt space).
+ */
+ seqno = i915_add_request(dev, flush_domains);
+ ASSERT(!(seqno == 0));
+ i915_file_priv->mm.last_gem_seqno = seqno;
+ for (i = 0; i < args.buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ i915_gem_object_move_to_active(obj, seqno);
+ DRM_DEBUG("%s: move to exec list %p\n", __func__, obj);
+ }
+
+err:
+ if (object_list != NULL) {
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
+ for (i = 0; i < args.buffer_count; i++) {
+ if (object_list[i]) {
+ obj_priv = object_list[i]->driver_private;
+ obj_priv->in_execbuffer = 0;
+ }
+ drm_gem_object_unreference(object_list[i]);
+ }
+ }
+ spin_unlock(&dev->struct_mutex);
+
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = DRM_COPY_TO_USER((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args.buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args.buffer_count);
+ if (ret)
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args.buffer_count, ret);
+ }
+
+pre_mutex_err:
+ drm_free(object_list, sizeof(*object_list) * args.buffer_count,
+ DRM_MEM_DRIVER);
+ drm_free(exec_list, sizeof(*exec_list) * args.buffer_count,
+ DRM_MEM_DRIVER);
+
+ return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ if (obj_priv->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ if (ret != 0) {
+ DRM_ERROR("Failure to bind: %d", ret);
+ return ret;
+ }
+ }
+ obj_priv->pin_count++;
+
+ /* If the object is not active and not pending a flush,
+ * remove it from the inactive list
+ */
+ if (obj_priv->pin_count == 1) {
+ atomic_inc(&dev->pin_count);
+ atomic_add(obj->size, &dev->pin_memory);
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0 &&
+ !list_empty(&obj_priv->list))
+ list_del_init(&obj_priv->list);
+ }
+ return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ obj_priv->pin_count--;
+ ASSERT(!(obj_priv->pin_count < 0));
+ ASSERT(!(obj_priv->gtt_space == NULL));
+
+ /* If the object is no longer pinned, and is
+ * neither active nor being flushed, then stick it on
+ * the inactive list
+ */
+ if (obj_priv->pin_count == 0) {
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0)
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.inactive_list, (caddr_t)obj_priv);
+ atomic_dec(&dev->pin_count);
+ atomic_sub(obj->size, &dev->pin_memory);
+ }
+}
+
+/*ARGSUSED*/
+int
+i915_gem_pin_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_pin args;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_pin __user *) data, sizeof(args));
+
+ spin_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+ args.handle);
+ spin_unlock(&dev->struct_mutex);
+ return EBADF;
+ }
+ DRM_DEBUG("i915_gem_pin_ioctl obj->name %d", obj->name);
+ obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != fpriv) {
+ DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ args.handle);
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return EINVAL;
+ }
+
+ obj_priv->user_pin_count++;
+ obj_priv->pin_filp = fpriv;
+ if (obj_priv->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args.alignment);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+ i915_gem_object_flush_cpu_write_domain(obj);
+ args.offset = obj_priv->gtt_offset;
+
+ ret = DRM_COPY_TO_USER((struct drm_i915_gem_pin __user *) data, &args, sizeof(args));
+ if ( ret != 0)
+ DRM_ERROR(" gem pin ioctl error! %d", ret);
+
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/*ARGSUSED*/
+int
+i915_gem_unpin_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_pin args;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_pin __user *) data, sizeof(args));
+
+ spin_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+ args.handle);
+ spin_unlock(&dev->struct_mutex);
+ return EBADF;
+ }
+ obj_priv = obj->driver_private;
+ DRM_DEBUG("i915_gem_unpin_ioctl, obj->name %d", obj->name);
+ if (obj_priv->pin_filp != fpriv) {
+ DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ args.handle);
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return EINVAL;
+ }
+ obj_priv->user_pin_count--;
+ if (obj_priv->user_pin_count == 0) {
+ obj_priv->pin_filp = NULL;
+ i915_gem_object_unpin(obj);
+ }
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+/*ARGSUSED*/
+int
+i915_gem_busy_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_busy args;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_busy __user *) data, sizeof(args));
+
+ spin_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+ args.handle);
+ spin_unlock(&dev->struct_mutex);
+ return EBADF;
+ }
+
+ obj_priv = obj->driver_private;
+ /* Don't count being on the flushing list against the object being
+ * done. Otherwise, a buffer left on the flushing list but not getting
+ * flushed (because nobody's flushing that domain) won't ever return
+ * unbusy and get reused by libdrm's bo cache. The other expected
+ * consumer of this interface, OpenGL's occlusion queries, also specs
+ * that the objects get unbusy "eventually" without any interference.
+ */
+ args.busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
+ DRM_DEBUG("i915_gem_busy_ioctl call obj->name %d busy %d", obj->name, args.busy);
+
+ ret = DRM_COPY_TO_USER((struct drm_i915_gem_busy __user *) data, &args, sizeof(args));
+ if ( ret != 0)
+ DRM_ERROR(" gem busy error! %d", ret);
+
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+/*ARGSUSED*/
+int
+i915_gem_throttle_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ return i915_gem_ring_throttle(dev, fpriv);
+}
+
+static int
+i915_gem_object_get_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ caddr_t va;
+ long i;
+
+ if (obj_priv->page_list)
+ return 0;
+ pgcnt_t np = btop(obj->size);
+
+ obj_priv->page_list = kmem_zalloc(np * sizeof(caddr_t), KM_SLEEP);
+ if (obj_priv->page_list == NULL) {
+ DRM_ERROR("Faled to allocate page list\n");
+ return ENOMEM;
+ }
+
+ for (i = 0, va = obj->kaddr; i < np; i++, va += PAGESIZE) {
+ obj_priv->page_list[i] = va;
+ }
+ return 0;
+}
+
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+ if (obj_priv == NULL)
+ return ENOMEM;
+
+ /*
+ * We've just allocated pages from the kernel,
+ * so they've just been written by the CPU with
+ * zeros. They'll need to be clflushed before we
+ * use them with the GPU.
+ */
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+ obj->driver_private = obj_priv;
+ obj_priv->obj = obj;
+ INIT_LIST_HEAD(&obj_priv->list);
+ return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ while (obj_priv->pin_count > 0)
+ i915_gem_object_unpin(obj);
+
+ DRM_DEBUG("%s: obj %d",__func__, obj->name);
+
+ (void) i915_gem_object_unbind(obj, 1);
+ if (obj_priv->page_cpu_valid != NULL)
+ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, DRM_MEM_DRIVER);
+ drm_free(obj->driver_private, sizeof(*obj_priv), DRM_MEM_DRIVER);
+}
+
+/** Unbinds all objects that are on the given buffer list. */
+static int
+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head, uint32_t type)
+{
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ while (!list_empty(head)) {
+ obj_priv = list_entry(head->next,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Pinned object in unbind list\n");
+ spin_unlock(&dev->struct_mutex);
+ return EINVAL;
+ }
+ DRM_DEBUG("%s: obj %d type %d",__func__, obj->name, type);
+ ret = i915_gem_object_unbind(obj, type);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+ ret);
+ spin_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+
+ return 0;
+}
+
+static int
+i915_gem_idle(struct drm_device *dev, uint32_t type)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno, cur_seqno, last_seqno;
+ int stuck, ret;
+
+ spin_lock(&dev->struct_mutex);
+
+ if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+ spin_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ */
+ dev_priv->mm.suspended = 1;
+
+ /* Cancel the retire work handler, wait for it to finish if running
+ */
+ if (worktimer_id != NULL) {
+ (void) untimeout(worktimer_id);
+ worktimer_id = NULL;
+ }
+
+ i915_kernel_lost_context(dev);
+
+ /* Flush the GPU along with all non-CPU write domains
+ */
+ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
+ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT));
+ if (seqno == 0) {
+ spin_unlock(&dev->struct_mutex);
+ return ENOMEM;
+ }
+
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ last_seqno = 0;
+ stuck = 0;
+ for (;;) {
+ cur_seqno = i915_get_gem_seqno(dev);
+ if (i915_seqno_passed(cur_seqno, seqno))
+ break;
+ if (last_seqno == cur_seqno) {
+ if (stuck++ > 100) {
+ DRM_ERROR("hardware wedged\n");
+ dev_priv->mm.wedged = 1;
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ break;
+ }
+ }
+ DRM_UDELAY(10);
+ last_seqno = cur_seqno;
+ }
+ dev_priv->mm.waiting_gem_seqno = 0;
+
+ i915_gem_retire_requests(dev);
+
+ /* Empty the active and flushing lists to inactive. If there's
+ * anything left at this point, it means that we're wedged and
+ * nothing good's going to happen by leaving them there. So strip
+ * the GPU domains and just stuff them onto inactive.
+ */
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_entry(dev_priv->mm.active_list.next,
+ struct drm_i915_gem_object,
+ list);
+ obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj_priv->obj);
+ }
+
+ while (!list_empty(&dev_priv->mm.flushing_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_entry(dev_priv->mm.flushing_list.next,
+ struct drm_i915_gem_object,
+ list);
+ obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj_priv->obj);
+ }
+
+ /* Move all inactive buffers out of the GTT. */
+ ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list, type);
+ ASSERT(list_empty(&dev_priv->mm.inactive_list));
+ if (ret) {
+ spin_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ i915_gem_cleanup_ringbuffer(dev);
+ spin_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int
+i915_gem_init_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ /* If we need a physical address for the status page, it's already
+ * initialized at driver load time.
+ */
+ if (!I915_NEED_GFX_HWS(dev))
+ return 0;
+
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return ENOMEM;
+ }
+
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ return ret;
+ }
+
+ dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->hws_map.offset = dev->agp->agp_info.agpi_aperbase + obj_priv->gtt_offset;
+ dev_priv->hws_map.size = 4096;
+ dev_priv->hws_map.type = 0;
+ dev_priv->hws_map.flags = 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ DRM_ERROR("Failed to map status page.\n");
+ (void) memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ drm_gem_object_unreference(obj);
+ return EINVAL;
+ }
+
+ dev_priv->hws_obj = obj;
+
+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
+
+ (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ (void) I915_READ(HWS_PGA); /* posting read */
+ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+
+ return 0;
+}
+
+static void
+i915_gem_cleanup_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+
+ if (dev_priv->hws_obj == NULL)
+ return;
+
+ obj = dev_priv->hws_obj;
+
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ dev_priv->hws_obj = NULL;
+
+ (void) memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ dev_priv->hw_status_page = NULL;
+
+ /* Write high address into HWS_PGA when disabling. */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
+int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+ u32 head;
+
+ ret = i915_gem_init_hws(dev);
+ if (ret != 0)
+ return ret;
+ obj = drm_gem_object_alloc(dev, 128 * 1024);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ i915_gem_cleanup_hws(dev);
+ return ENOMEM;
+ }
+
+ obj_priv = obj->driver_private;
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ i915_gem_cleanup_hws(dev);
+ return ret;
+ }
+
+ /* Set up the kernel mapping for the ring. */
+ dev_priv->ring.Size = obj->size;
+ dev_priv->ring.tail_mask = obj->size - 1;
+
+ dev_priv->ring.map.offset = dev->agp->agp_info.agpi_aperbase + obj_priv->gtt_offset;
+ dev_priv->ring.map.size = obj->size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->ring.map, dev);
+ if (dev_priv->ring.map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ (void) memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+ drm_gem_object_unreference(obj);
+ i915_gem_cleanup_hws(dev);
+ return EINVAL;
+ }
+
+ dev_priv->ring.ring_obj = obj;
+
+ dev_priv->ring.virtual_start = (u8 *) dev_priv->ring.map.handle;
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(PRB0_CTL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+ I915_WRITE(PRB0_TAIL, 0);
+
+
+ /* Initialize the ring. */
+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_ERROR("Ring head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ I915_READ(PRB0_CTL),
+ I915_READ(PRB0_HEAD),
+ I915_READ(PRB0_TAIL),
+ I915_READ(PRB0_START));
+ I915_WRITE(PRB0_HEAD, 0);
+
+ DRM_ERROR("Ring head forced to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ I915_READ(PRB0_CTL),
+ I915_READ(PRB0_HEAD),
+ I915_READ(PRB0_TAIL),
+ I915_READ(PRB0_START));
+ }
+
+ I915_WRITE(PRB0_CTL,
+ ((obj->size - 4096) & RING_NR_PAGES) |
+ RING_NO_REPORT |
+ RING_VALID);
+
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+
+ /* If the head is still not zero, the ring is dead */
+ if (head != 0) {
+ DRM_ERROR("Ring initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ I915_READ(PRB0_CTL),
+ I915_READ(PRB0_HEAD),
+ I915_READ(PRB0_TAIL),
+ I915_READ(PRB0_START));
+ return EIO;
+ }
+
+ /* Update our cache of the ring state */
+ i915_kernel_lost_context(dev);
+
+ return 0;
+}
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->ring.ring_obj == NULL)
+ return;
+
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+
+ i915_gem_object_unpin(dev_priv->ring.ring_obj);
+ drm_gem_object_unreference(dev_priv->ring.ring_obj);
+ dev_priv->ring.ring_obj = NULL;
+ (void) memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+ i915_gem_cleanup_hws(dev);
+}
+
+/*ARGSUSED*/
+int
+i915_gem_entervt_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
+ dev_priv->mm.wedged = 0;
+ }
+ /* Set up the kernel mapping for the ring. */
+ dev_priv->mm.gtt_mapping.offset = dev->agp->agp_info.agpi_aperbase;
+ dev_priv->mm.gtt_mapping.size = dev->agp->agp_info.agpi_apersize;
+ dev_priv->mm.gtt_mapping.type = 0;
+ dev_priv->mm.gtt_mapping.flags = 0;
+ dev_priv->mm.gtt_mapping.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->mm.gtt_mapping, dev);
+
+ spin_lock(&dev->struct_mutex);
+ dev_priv->mm.suspended = 0;
+ ret = i915_gem_init_ringbuffer(dev);
+ if (ret != 0)
+ return ret;
+
+ spin_unlock(&dev->struct_mutex);
+
+ (void) drm_irq_install(dev);
+
+ return 0;
+}
+
+/*ARGSUSED*/
+int
+i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ ret = i915_gem_idle(dev, 0);
+ (void) drm_irq_uninstall(dev);
+
+ drm_core_ioremapfree(&dev_priv->mm.gtt_mapping, dev);
+ return ret;
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_idle(dev, 1);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+
+ drm_mm_clean_ml(&dev_priv->mm.gtt_space);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ dev_priv->mm.next_gem_seqno = 1;
+
+ i915_gem_detect_bit_6_swizzle(dev);
+
+}
+
diff --git a/usr/src/uts/intel/io/drm/i915_gem_debug.c b/usr/src/uts/intel/io/drm/i915_gem_debug.c
new file mode 100644
index 0000000..08580bc
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_gem_debug.c
@@ -0,0 +1,1108 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define BUFFER_FAIL(_count, _len, _name) { \
+ DRM_ERROR("Buffer size too small in %s (%d < %d)\n", \
+ (_name), (_count), (_len)); \
+ (*failures)++; \
+ return count; \
+}
+
+
+static uint32_t saved_s2 = 0, saved_s4 = 0;
+static char saved_s2_set = 0, saved_s4_set = 0;
+
+static void
+instr_out(uint32_t *data, uint32_t hw_offset, unsigned int index,
+ const char *fmt, ...)
+{
+
+ DRM_ERROR("0x%08x: 0x%08x:%s ", hw_offset + index * 4, data[index],
+ index == 0 ? "" : " ");
+ va_list ap;
+
+ va_start(ap, fmt);
+ vcmn_err(CE_WARN, fmt, ap);
+ va_end(ap);
+
+}
+
+static int
+decode_mi(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_mi[] = {
+ { 0x08, 1, 1, "MI_ARB_ON_OFF" },
+ { 0x0a, 1, 1, "MI_BATCH_BUFFER_END" },
+ { 0x31, 2, 2, "MI_BATCH_BUFFER_START" },
+ { 0x14, 3, 3, "MI_DISPLAY_BUFFER_INFO" },
+ { 0x04, 1, 1, "MI_FLUSH" },
+ { 0x22, 3, 3, "MI_LOAD_REGISTER_IMM" },
+ { 0x13, 2, 2, "MI_LOAD_SCAN_LINES_EXCL" },
+ { 0x12, 2, 2, "MI_LOAD_SCAN_LINES_INCL" },
+ { 0x00, 1, 1, "MI_NOOP" },
+ { 0x11, 2, 2, "MI_OVERLAY_FLIP" },
+ { 0x07, 1, 1, "MI_REPORT_HEAD" },
+ { 0x18, 2, 2, "MI_SET_CONTEXT" },
+ { 0x20, 3, 4, "MI_STORE_DATA_IMM" },
+ { 0x21, 3, 4, "MI_STORE_DATA_INDEX" },
+ { 0x24, 3, 3, "MI_STORE_REGISTER_MEM" },
+ { 0x02, 1, 1, "MI_USER_INTERRUPT" },
+ { 0x03, 1, 1, "MI_WAIT_FOR_EVENT" },
+ };
+
+
+ for (opcode = 0; opcode < sizeof(opcodes_mi) / sizeof(opcodes_mi[0]);
+ opcode++) {
+ if ((data[0] & 0x1f800000) >> 23 == opcodes_mi[opcode].opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_mi[opcode].name);
+ if (opcodes_mi[opcode].max_len > 1) {
+ len = (data[0] & 0x000000ff) + 2;
+ if (len < opcodes_mi[opcode].min_len ||
+ len > opcodes_mi[opcode].max_len)
+ {
+ DRM_ERROR("Bad length in %s\n",
+ opcodes_mi[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_mi[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "MI UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static int
+decode_2d(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode, len;
+ char *format = NULL;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_2d[] = {
+ { 0x40, 5, 5, "COLOR_BLT" },
+ { 0x43, 6, 6, "SRC_COPY_BLT" },
+ { 0x01, 8, 8, "XY_SETUP_BLT" },
+ { 0x11, 9, 9, "XY_SETUP_MONO_PATTERN_SL_BLT" },
+ { 0x03, 3, 3, "XY_SETUP_CLIP_BLT" },
+ { 0x24, 2, 2, "XY_PIXEL_BLT" },
+ { 0x25, 3, 3, "XY_SCANLINES_BLT" },
+ { 0x26, 4, 4, "Y_TEXT_BLT" },
+ { 0x31, 5, 134, "XY_TEXT_IMMEDIATE_BLT" },
+ { 0x50, 6, 6, "XY_COLOR_BLT" },
+ { 0x51, 6, 6, "XY_PAT_BLT" },
+ { 0x76, 8, 8, "XY_PAT_CHROMA_BLT" },
+ { 0x72, 7, 135, "XY_PAT_BLT_IMMEDIATE" },
+ { 0x77, 9, 137, "XY_PAT_CHROMA_BLT_IMMEDIATE" },
+ { 0x52, 9, 9, "XY_MONO_PAT_BLT" },
+ { 0x59, 7, 7, "XY_MONO_PAT_FIXED_BLT" },
+ { 0x53, 8, 8, "XY_SRC_COPY_BLT" },
+ { 0x54, 8, 8, "XY_MONO_SRC_COPY_BLT" },
+ { 0x71, 9, 137, "XY_MONO_SRC_COPY_IMMEDIATE_BLT" },
+ { 0x55, 9, 9, "XY_FULL_BLT" },
+ { 0x55, 9, 137, "XY_FULL_IMMEDIATE_PATTERN_BLT" },
+ { 0x56, 9, 9, "XY_FULL_MONO_SRC_BLT" },
+ { 0x75, 10, 138, "XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT" },
+ { 0x57, 12, 12, "XY_FULL_MONO_PATTERN_BLT" },
+ { 0x58, 12, 12, "XY_FULL_MONO_PATTERN_MONO_SRC_BLT" },
+ };
+
+ switch ((data[0] & 0x1fc00000) >> 22) {
+ case 0x50:
+ instr_out(data, hw_offset, 0,
+ "XY_COLOR_BLT (rgb %sabled, alpha %sabled, dst tile %d)\n",
+ (data[0] & (1 << 20)) ? "en" : "dis",
+ (data[0] & (1 << 21)) ? "en" : "dis",
+ (data[0] >> 11) & 1);
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 6)
+ DRM_ERROR("Bad count in XY_COLOR_BLT\n");
+ if (count < 6)
+ BUFFER_FAIL(count, len, "XY_COLOR_BLT");
+
+ switch ((data[1] >> 24) & 0x3) {
+ case 0:
+ format="8";
+ break;
+ case 1:
+ format="565";
+ break;
+ case 2:
+ format="1555";
+ break;
+ case 3:
+ format="8888";
+ break;
+ }
+
+ instr_out(data, hw_offset, 1, "format %s, pitch %d, "
+ "clipping %sabled\n", format,
+ (short)(data[1] & 0xffff),
+ data[1] & (1 << 30) ? "en" : "dis");
+ instr_out(data, hw_offset, 2, "(%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(data, hw_offset, 3, "(%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(data, hw_offset, 4, "offset 0x%08x\n", data[4]);
+ instr_out(data, hw_offset, 5, "color\n");
+ return len;
+ case 0x53:
+ instr_out(data, hw_offset, 0,
+ "XY_SRC_COPY_BLT (rgb %sabled, alpha %sabled, "
+ "src tile %d, dst tile %d)\n",
+ (data[0] & (1 << 20)) ? "en" : "dis",
+ (data[0] & (1 << 21)) ? "en" : "dis",
+ (data[0] >> 15) & 1,
+ (data[0] >> 11) & 1);
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 8)
+ DRM_ERROR("Bad count in XY_SRC_COPY_BLT\n");
+ if (count < 8)
+ BUFFER_FAIL(count, len, "XY_SRC_COPY_BLT");
+
+ switch ((data[1] >> 24) & 0x3) {
+ case 0:
+ format="8";
+ break;
+ case 1:
+ format="565";
+ break;
+ case 2:
+ format="1555";
+ break;
+ case 3:
+ format="8888";
+ break;
+ }
+
+ instr_out(data, hw_offset, 1, "format %s, dst pitch %d, "
+ "clipping %sabled\n", format,
+ (short)(data[1] & 0xffff),
+ data[1] & (1 << 30) ? "en" : "dis");
+ instr_out(data, hw_offset, 2, "dst (%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(data, hw_offset, 3, "dst (%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(data, hw_offset, 4, "dst offset 0x%08x\n", data[4]);
+ instr_out(data, hw_offset, 5, "src (%d,%d)\n",
+ data[5] & 0xffff, data[5] >> 16);
+ instr_out(data, hw_offset, 6, "src pitch %d\n",
+ (short)(data[6] & 0xffff));
+ instr_out(data, hw_offset, 7, "src offset 0x%08x\n", data[7]);
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_2d) / sizeof(opcodes_2d[0]);
+ opcode++) {
+ if ((data[0] & 0x1fc00000) >> 22 == opcodes_2d[opcode].opcode) {
+ unsigned int i;
+
+ len = 1;
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_2d[opcode].name);
+ if (opcodes_2d[opcode].max_len > 1) {
+ len = (data[0] & 0x000000ff) + 2;
+ if (len < opcodes_2d[opcode].min_len ||
+ len > opcodes_2d[opcode].max_len)
+ {
+ DRM_ERROR("Bad count in %s\n", opcodes_2d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_2d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "2D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+/*ARGSUSED*/
+static int
+decode_3d_1c(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ switch ((data[0] & 0x00f80000) >> 19) {
+ case 0x11:
+ instr_out(data, hw_offset, 0, "3DSTATE_DEPTH_SUBRECTANGLE_DISALBE\n");
+ return 1;
+ case 0x10:
+ instr_out(data, hw_offset, 0, "3DSTATE_SCISSOR_ENABLE\n");
+ return 1;
+ case 0x01:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_COORD_SET_I830\n");
+ return 1;
+ case 0x0a:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_CUBE_I830\n");
+ return 1;
+ case 0x05:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_TEX_STREAM_I830\n");
+ return 1;
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static int
+decode_3d_1d(uint32_t *data, int count, uint32_t hw_offset, int *failures, int i830)
+{
+ unsigned int len, i, c, opcode, word, map, sampler, instr;
+
+ struct {
+ uint32_t opcode;
+ int i830_only;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d_1d[] = {
+ { 0x8e, 0, 3, 3, "3DSTATE_BUFFER_INFO" },
+ { 0x86, 0, 4, 4, "3DSTATE_CHROMA_KEY" },
+ { 0x9c, 0, 1, 1, "3DSTATE_CLEAR_PARAMETERS" },
+ { 0x88, 0, 2, 2, "3DSTATE_CONSTANT_BLEND_COLOR" },
+ { 0x99, 0, 2, 2, "3DSTATE_DEFAULT_DIFFUSE" },
+ { 0x9a, 0, 2, 2, "3DSTATE_DEFAULT_SPECULAR" },
+ { 0x98, 0, 2, 2, "3DSTATE_DEFAULT_Z" },
+ { 0x97, 0, 2, 2, "3DSTATE_DEPTH_OFFSET_SCALE" },
+ { 0x85, 0, 2, 2, "3DSTATE_DEST_BUFFER_VARIABLES" },
+ { 0x80, 0, 5, 5, "3DSTATE_DRAWING_RECTANGLE" },
+ { 0x8e, 0, 3, 3, "3DSTATE_BUFFER_INFO" },
+ { 0x9d, 0, 65, 65, "3DSTATE_FILTER_COEFFICIENTS_4X4" },
+ { 0x9e, 0, 4, 4, "3DSTATE_MONO_FILTER" },
+ { 0x89, 0, 4, 4, "3DSTATE_FOG_MODE" },
+ { 0x8f, 0, 2, 16, "3DSTATE_MAP_PALLETE_LOAD_32" },
+ { 0x81, 0, 3, 3, "3DSTATE_SCISSOR_RECTANGLE" },
+ { 0x83, 0, 2, 2, "3DSTATE_SPAN_STIPPLE" },
+ { 0x8c, 1, 2, 2, "3DSTATE_MAP_COORD_TRANSFORM_I830" },
+ { 0x8b, 1, 2, 2, "3DSTATE_MAP_VERTEX_TRANSFORM_I830" },
+ { 0x8d, 1, 3, 3, "3DSTATE_W_STATE_I830" },
+ { 0x01, 1, 2, 2, "3DSTATE_COLOR_FACTOR_I830" },
+ { 0x02, 1, 2, 2, "3DSTATE_MAP_COORD_SETBIND_I830" },
+ };
+
+ switch ((data[0] & 0x00ff0000) >> 16) {
+ case 0x07:
+ /* This instruction is unusual. A 0 length means just 1 DWORD instead of
+ * 2. The 0 length is specified in one place to be unsupported, but
+ * stated to be required in another, and 0 length LOAD_INDIRECTs appear
+ * to cause no harm at least.
+ */
+ instr_out(data, hw_offset, 0, "3DSTATE_LOAD_INDIRECT\n");
+ len = (data[0] & 0x000000ff) + 1;
+ i = 1;
+ if (data[0] & (0x01 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "SIS.0\n");
+ instr_out(data, hw_offset, i++, "SIS.1\n");
+ }
+ if (data[0] & (0x02 << 8)) {
+ if (i + 1 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "DIS.0\n");
+ }
+ if (data[0] & (0x04 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "SSB.0\n");
+ instr_out(data, hw_offset, i++, "SSB.1\n");
+ }
+ if (data[0] & (0x08 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "MSB.0\n");
+ instr_out(data, hw_offset, i++, "MSB.1\n");
+ }
+ if (data[0] & (0x10 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "PSP.0\n");
+ instr_out(data, hw_offset, i++, "PSP.1\n");
+ }
+ if (data[0] & (0x20 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "PSC.0\n");
+ instr_out(data, hw_offset, i++, "PSC.1\n");
+ }
+ if (len != i) {
+ DRM_ERROR("Bad count in 3DSTATE_LOAD_INDIRECT\n");
+ (*failures)++;
+ return len;
+ }
+ return len;
+ case 0x04:
+ instr_out(data, hw_offset, 0, "3DSTATE_LOAD_STATE_IMMEDIATE_1\n");
+ len = (data[0] & 0x0000000f) + 2;
+ i = 1;
+ for (word = 0; word <= 7; word++) {
+ if (data[0] & (1 << (4 + word))) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_STATE_IMMEDIATE_1");
+
+ /* save vertex state for decode */
+ if (word == 2) {
+ saved_s2_set = 1;
+ saved_s2 = data[i];
+ }
+ if (word == 4) {
+ saved_s4_set = 1;
+ saved_s4 = data[i];
+ }
+
+ instr_out(data, hw_offset, i++, "S%d\n", word);
+ }
+ }
+ if (len != i) {
+ DRM_ERROR("Bad count in 3DSTATE_LOAD_INDIRECT\n");
+ (*failures)++;
+ }
+ return len;
+ case 0x00:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_STATE\n");
+ len = (data[0] & 0x0000003f) + 2;
+
+ i = 1;
+ for (map = 0; map <= 15; map++) {
+ if (data[1] & (1 << map)) {
+ if (i + 3 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_MAP_STATE");
+ instr_out(data, hw_offset, i++, "map %d MS2\n", map);
+ instr_out(data, hw_offset, i++, "map %d MS3\n", map);
+ instr_out(data, hw_offset, i++, "map %d MS4\n", map);
+ }
+ }
+ if (len != i) {
+ DRM_ERROR("Bad count in 3DSTATE_MAP_STATE\n");
+ (*failures)++;
+ return len;
+ }
+ return len;
+ case 0x06:
+ instr_out(data, hw_offset, 0, "3DSTATE_PIXEL_SHADER_CONSTANTS\n");
+ len = (data[0] & 0x000000ff) + 2;
+
+ i = 1;
+ for (c = 0; c <= 31; c++) {
+ if (data[1] & (1 << c)) {
+ if (i + 4 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_PIXEL_SHADER_CONSTANTS");
+ instr_out(data, hw_offset, i, "C%d.X = %x float\n",
+ c, data[i]);
+ i++;
+ instr_out(data, hw_offset, i, "C%d.Y = %x float\n",
+ c, data[i]);
+ i++;
+ instr_out(data, hw_offset, i, "C%d.Z = %x float\n",
+ c, data[i]);
+ i++;
+ instr_out(data, hw_offset, i, "C%d.W = %x float\n",
+ c, data[i]);
+ i++;
+ }
+ }
+ if (len != i) {
+ DRM_ERROR("Bad count in 3DSTATE_MAP_STATE\n");
+ (*failures)++;
+ }
+ return len;
+ case 0x05:
+ instr_out(data, hw_offset, 0, "3DSTATE_PIXEL_SHADER_PROGRAM\n");
+ len = (data[0] & 0x000000ff) + 2;
+ if ((len - 1) % 3 != 0 || len > 370) {
+ DRM_ERROR("Bad count in 3DSTATE_PIXEL_SHADER_PROGRAM\n");
+ (*failures)++;
+ }
+ i = 1;
+ for (instr = 0; instr < (len - 1) / 3; instr++) {
+ if (i + 3 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_MAP_STATE");
+ instr_out(data, hw_offset, i++, "PS%03x\n", instr);
+ instr_out(data, hw_offset, i++, "PS%03x\n", instr);
+ instr_out(data, hw_offset, i++, "PS%03x\n", instr);
+ }
+ return len;
+ case 0x01:
+ if (i830)
+ break;
+ instr_out(data, hw_offset, 0, "3DSTATE_SAMPLER_STATE\n");
+ len = (data[0] & 0x0000003f) + 2;
+ i = 1;
+ for (sampler = 0; sampler <= 15; sampler++) {
+ if (data[1] & (1 << sampler)) {
+ if (i + 3 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_SAMPLER_STATE");
+ instr_out(data, hw_offset, i++, "sampler %d SS2\n",
+ sampler);
+ instr_out(data, hw_offset, i++, "sampler %d SS3\n",
+ sampler);
+ instr_out(data, hw_offset, i++, "sampler %d SS4\n",
+ sampler);
+ }
+ }
+ if (len != i) {
+ DRM_ERROR("Bad count in 3DSTATE_SAMPLER_STATE\n");
+ (*failures)++;
+ }
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d_1d) / sizeof(opcodes_3d_1d[0]);
+ opcode++)
+ {
+ if (opcodes_3d_1d[opcode].i830_only && !i830)
+ continue;
+
+ if (((data[0] & 0x00ff0000) >> 16) == opcodes_3d_1d[opcode].opcode) {
+ len = 1;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d_1d[opcode].name);
+ if (opcodes_3d_1d[opcode].max_len > 1) {
+ len = (data[0] & 0x0000ffff) + 2;
+ if (len < opcodes_3d_1d[opcode].min_len ||
+ len > opcodes_3d_1d[opcode].max_len)
+ {
+ DRM_ERROR("Bad count in %s\n",
+ opcodes_3d_1d[opcode].name);
+ (*failures)++;
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d_1d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static int
+decode_3d_primitive(uint32_t *data, int count, uint32_t hw_offset,
+ int *failures)
+{
+ char immediate = (data[0] & (1 << 23)) == 0;
+ unsigned int len, i;
+ char *primtype;
+
+ switch ((data[0] >> 18) & 0xf) {
+ case 0x0: primtype = "TRILIST"; break;
+ case 0x1: primtype = "TRISTRIP"; break;
+ case 0x2: primtype = "TRISTRIP_REVERSE"; break;
+ case 0x3: primtype = "TRIFAN"; break;
+ case 0x4: primtype = "POLYGON"; break;
+ case 0x5: primtype = "LINELIST"; break;
+ case 0x6: primtype = "LINESTRIP"; break;
+ case 0x7: primtype = "RECTLIST"; break;
+ case 0x8: primtype = "POINTLIST"; break;
+ case 0x9: primtype = "DIB"; break;
+ case 0xa: primtype = "CLEAR_RECT"; break;
+ default: primtype = "unknown"; break;
+ }
+
+ /* XXX: 3DPRIM_DIB not supported */
+ if (immediate) {
+ len = (data[0] & 0x0003ffff) + 2;
+ instr_out(data, hw_offset, 0, "3DPRIMITIVE inline %s\n", primtype);
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DPRIMITIVE inline");
+ if (!saved_s2_set || !saved_s4_set) {
+ DRM_ERROR("unknown vertex format\n");
+ for (i = 1; i < len; i++) {
+ instr_out(data, hw_offset, i,
+ " vertex data (%x float)\n",
+ data[i]);
+ }
+ } else {
+ unsigned int vertex = 0;
+ for (i = 1; i < len;) {
+ unsigned int tc;
+
+#define VERTEX_OUT(fmt, ...) { \
+ if (i < len) \
+ instr_out(data, hw_offset, i, " V%d."fmt"\n", vertex, __VA_ARGS__); \
+ else \
+ DRM_ERROR(" missing data in V%d\n", vertex); \
+ i++; \
+}
+
+ VERTEX_OUT("X = %x float", data[i]);
+ VERTEX_OUT("Y = %x float", data[i]);
+ switch (saved_s4 >> 6 & 0x7) {
+ case 0x1:
+ VERTEX_OUT("Z = %x float", data[i]);
+ break;
+ case 0x2:
+ VERTEX_OUT("Z = %x float", data[i]);
+ VERTEX_OUT("W = %x float", data[i]);
+ break;
+ case 0x3:
+ break;
+ case 0x4:
+ VERTEX_OUT("W = %x float", data[i]);
+ break;
+ default:
+ DRM_ERROR("bad S4 position mask\n");
+ }
+
+ if (saved_s4 & (1 << 10)) {
+ VERTEX_OUT("color = (A=0x%02x, R=0x%02x, G=0x%02x, "
+ "B=0x%02x)",
+ data[i] >> 24,
+ (data[i] >> 16) & 0xff,
+ (data[i] >> 8) & 0xff,
+ data[i] & 0xff);
+ }
+ if (saved_s4 & (1 << 11)) {
+ VERTEX_OUT("spec = (A=0x%02x, R=0x%02x, G=0x%02x, "
+ "B=0x%02x)",
+ data[i] >> 24,
+ (data[i] >> 16) & 0xff,
+ (data[i] >> 8) & 0xff,
+ data[i] & 0xff);
+ }
+ if (saved_s4 & (1 << 12))
+ VERTEX_OUT("width = 0x%08x)", data[i]);
+
+ for (tc = 0; tc <= 7; tc++) {
+ switch ((saved_s2 >> (tc * 4)) & 0xf) {
+ case 0x0:
+ VERTEX_OUT("T%d.X = %x float", tc, data[i]);
+ VERTEX_OUT("T%d.Y = %x float", tc, data[i]);
+ break;
+ case 0x1:
+ VERTEX_OUT("T%d.X = %x float", tc, data[i]);
+ VERTEX_OUT("T%d.Y = %x float", tc, data[i]);
+ VERTEX_OUT("T%d.Z = %x float", tc, data[i]);
+ break;
+ case 0x2:
+ VERTEX_OUT("T%d.X = %x float", tc, data[i]);
+ VERTEX_OUT("T%d.Y = %x float", tc, data[i]);
+ VERTEX_OUT("T%d.Z = %x float", tc, data[i]);
+ VERTEX_OUT("T%d.W = %x float", tc, data[i]);
+ break;
+ case 0x3:
+ VERTEX_OUT("T%d.X = %x float", tc, data[i]);
+ break;
+ case 0x4:
+ VERTEX_OUT("T%d.XY = 0x%08x half-float", tc, data[i]);
+ break;
+ case 0x5:
+ VERTEX_OUT("T%d.XY = 0x%08x half-float", tc, data[i]);
+ VERTEX_OUT("T%d.ZW = 0x%08x half-float", tc, data[i]);
+ break;
+ case 0xf:
+ break;
+ default:
+ DRM_ERROR("bad S2.T%d format\n", tc);
+ }
+ }
+ vertex++;
+ }
+ }
+ } else {
+ /* indirect vertices */
+ len = data[0] & 0x0000ffff; /* index count */
+ if (data[0] & (1 << 17)) {
+ /* random vertex access */
+ if (count < (len + 1) / 2 + 1)
+ BUFFER_FAIL(count, (len + 1) / 2 + 1, "3DPRIMITIVE random indirect");
+ instr_out(data, hw_offset, 0,
+ "3DPRIMITIVE random indirect %s (%d)\n", primtype, len);
+ if (len == 0) {
+ /* vertex indices continue until 0xffff is found */
+ for (i = 1; i < count; i++) {
+ if ((data[i] & 0xffff) == 0xffff) {
+ instr_out(data, hw_offset, i,
+ " indices: (terminator)\n");
+ return i;
+ } else if ((data[i] >> 16) == 0xffff) {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x, "
+ "(terminator)\n",
+ data[i] & 0xffff);
+ return i;
+ } else {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x, 0x%04x\n",
+ data[i] & 0xffff, data[i] >> 16);
+ }
+ }
+ DRM_ERROR("3DPRIMITIVE: no terminator found in index buffer\n");
+ (*failures)++;
+ return count;
+ } else {
+ /* fixed size vertex index buffer */
+ for (i = 0; i < len; i += 2) {
+ if (i * 2 == len - 1) {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x\n",
+ data[i] & 0xffff);
+ } else {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x, 0x%04x\n",
+ data[i] & 0xffff, data[i] >> 16);
+ }
+ }
+ }
+ return (len + 1) / 2 + 1;
+ } else {
+ /* sequential vertex access */
+ if (count < 2)
+ BUFFER_FAIL(count, 2, "3DPRIMITIVE seq indirect");
+ instr_out(data, hw_offset, 0,
+ "3DPRIMITIVE sequential indirect %s, %d starting from "
+ "%d\n", primtype, len, data[1] & 0xffff);
+ instr_out(data, hw_offset, 1, " start\n");
+ return 2;
+ }
+ }
+
+ return len;
+}
+
+static int
+decode_3d(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d[] = {
+ { 0x06, 1, 1, "3DSTATE_ANTI_ALIASING" },
+ { 0x08, 1, 1, "3DSTATE_BACKFACE_STENCIL_OPS" },
+ { 0x09, 1, 1, "3DSTATE_BACKFACE_STENCIL_MASKS" },
+ { 0x16, 1, 1, "3DSTATE_COORD_SET_BINDINGS" },
+ { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
+ { 0x0b, 1, 1, "3DSTATE_INDEPENDENT_ALPHA_BLEND" },
+ { 0x0d, 1, 1, "3DSTATE_MODES_4" },
+ { 0x0c, 1, 1, "3DSTATE_MODES_5" },
+ { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES" },
+ };
+
+ switch ((data[0] & 0x1f000000) >> 24) {
+ case 0x1f:
+ return decode_3d_primitive(data, count, hw_offset, failures);
+ case 0x1d:
+ return decode_3d_1d(data, count, hw_offset, failures, 0);
+ case 0x1c:
+ return decode_3d_1c(data, count, hw_offset, failures);
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
+ opcode++) {
+ if ((data[0] & 0x1f000000) >> 24 == opcodes_3d[opcode].opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
+ if (opcodes_3d[opcode].max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcodes_3d[opcode].min_len ||
+ len > opcodes_3d[opcode].max_len)
+ {
+ DRM_ERROR("Bad count in %s\n", opcodes_3d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static const char *
+get_965_surfacetype(unsigned int surfacetype)
+{
+ switch (surfacetype) {
+ case 0: return "1D";
+ case 1: return "2D";
+ case 2: return "3D";
+ case 3: return "CUBE";
+ case 4: return "BUFFER";
+ case 7: return "NULL";
+ default: return "unknown";
+ }
+}
+
+static const char *
+get_965_depthformat(unsigned int depthformat)
+{
+ switch (depthformat) {
+ case 0: return "s8_z24float";
+ case 1: return "z32float";
+ case 2: return "z24s8";
+ case 5: return "z16";
+ default: return "unknown";
+ }
+}
+
+static int
+decode_3d_965(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode, len;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d[] = {
+ { 0x6000, 3, 3, "URB_FENCE" },
+ { 0x6001, 2, 2, "CS_URB_STATE" },
+ { 0x6002, 2, 2, "CONSTANT_BUFFER" },
+ { 0x6101, 6, 6, "STATE_BASE_ADDRESS" },
+ { 0x6102, 2, 2 , "STATE_SIP" },
+ { 0x6104, 1, 1, "3DSTATE_PIPELINE_SELECT" },
+ { 0x680b, 1, 1, "3DSTATE_VF_STATISTICS" },
+ { 0x6904, 1, 1, "3DSTATE_PIPELINE_SELECT" },
+ { 0x7800, 7, 7, "3DSTATE_PIPELINED_POINTERS" },
+ { 0x7801, 6, 6, "3DSTATE_BINDING_TABLE_POINTERS" },
+ { 0x780b, 1, 1, "3DSTATE_VF_STATISTICS" },
+ { 0x7808, 5, 257, "3DSTATE_VERTEX_BUFFERS" },
+ { 0x7809, 3, 256, "3DSTATE_VERTEX_ELEMENTS" },
+ /* 0x7808: 3DSTATE_VERTEX_BUFFERS */
+ /* 0x7809: 3DSTATE_VERTEX_ELEMENTS */
+ { 0x7900, 4, 4, "3DSTATE_DRAWING_RECTANGLE" },
+ { 0x7901, 5, 5, "3DSTATE_CONSTANT_COLOR" },
+ { 0x7905, 5, 7, "3DSTATE_DEPTH_BUFFER" },
+ { 0x7906, 2, 2, "3DSTATE_POLY_STIPPLE_OFFSET" },
+ { 0x7907, 33, 33, "3DSTATE_POLY_STIPPLE_PATTERN" },
+ { 0x7908, 3, 3, "3DSTATE_LINE_STIPPLE" },
+ { 0x7909, 2, 2, "3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP" },
+ { 0x790a, 3, 3, "3DSTATE_AA_LINE_PARAMETERS" },
+ { 0x7b00, 6, 6, "3DPRIMITIVE" },
+ };
+
+ len = (data[0] & 0x0000ffff) + 2;
+
+ switch ((data[0] & 0xffff0000) >> 16) {
+ case 0x6101:
+ if (len != 6)
+ DRM_ERROR("Bad count in STATE_BASE_ADDRESS\n");
+ if (count < 6)
+ BUFFER_FAIL(count, len, "STATE_BASE_ADDRESS");
+
+ instr_out(data, hw_offset, 0,
+ "STATE_BASE_ADDRESS\n");
+
+ if (data[1] & 1) {
+ instr_out(data, hw_offset, 1, "General state at 0x%08x\n",
+ data[1] & ~1);
+ } else
+ instr_out(data, hw_offset, 1, "General state not updated\n");
+
+ if (data[2] & 1) {
+ instr_out(data, hw_offset, 2, "Surface state at 0x%08x\n",
+ data[2] & ~1);
+ } else
+ instr_out(data, hw_offset, 2, "Surface state not updated\n");
+
+ if (data[3] & 1) {
+ instr_out(data, hw_offset, 3, "Indirect state at 0x%08x\n",
+ data[3] & ~1);
+ } else
+ instr_out(data, hw_offset, 3, "Indirect state not updated\n");
+
+ if (data[4] & 1) {
+ instr_out(data, hw_offset, 4, "General state upper bound 0x%08x\n",
+ data[4] & ~1);
+ } else
+ instr_out(data, hw_offset, 4, "General state not updated\n");
+
+ if (data[5] & 1) {
+ instr_out(data, hw_offset, 5, "Indirect state upper bound 0x%08x\n",
+ data[5] & ~1);
+ } else
+ instr_out(data, hw_offset, 5, "Indirect state not updated\n");
+
+ return len;
+ case 0x7800:
+ if (len != 7)
+ DRM_ERROR("Bad count in 3DSTATE_PIPELINED_POINTERS\n");
+ if (count < 7)
+ BUFFER_FAIL(count, len, "3DSTATE_PIPELINED_POINTERS");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_PIPELINED_POINTERS\n");
+ instr_out(data, hw_offset, 1, "VS state\n");
+ instr_out(data, hw_offset, 2, "GS state\n");
+ instr_out(data, hw_offset, 3, "Clip state\n");
+ instr_out(data, hw_offset, 4, "SF state\n");
+ instr_out(data, hw_offset, 5, "WM state\n");
+ instr_out(data, hw_offset, 6, "CC state\n");
+ return len;
+ case 0x7801:
+ if (len != 6)
+ DRM_ERROR("Bad count in 3DSTATE_BINDING_TABLE_POINTERS\n");
+ if (count < 6)
+ BUFFER_FAIL(count, len, "3DSTATE_BINDING_TABLE_POINTERS");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_BINDING_TABLE_POINTERS\n");
+ instr_out(data, hw_offset, 1, "VS binding table\n");
+ instr_out(data, hw_offset, 2, "GS binding table\n");
+ instr_out(data, hw_offset, 3, "Clip binding table\n");
+ instr_out(data, hw_offset, 4, "SF binding table\n");
+ instr_out(data, hw_offset, 5, "WM binding table\n");
+
+ return len;
+
+ case 0x7900:
+ if (len != 4)
+ DRM_ERROR("Bad count in 3DSTATE_DRAWING_RECTANGLE\n");
+ if (count < 4)
+ BUFFER_FAIL(count, len, "3DSTATE_DRAWING_RECTANGLE");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_DRAWING_RECTANGLE\n");
+ instr_out(data, hw_offset, 1, "top left: %d,%d\n",
+ data[1] & 0xffff,
+ (data[1] >> 16) & 0xffff);
+ instr_out(data, hw_offset, 2, "bottom right: %d,%d\n",
+ data[2] & 0xffff,
+ (data[2] >> 16) & 0xffff);
+ instr_out(data, hw_offset, 3, "origin: %d,%d\n",
+ (int)data[3] & 0xffff,
+ ((int)data[3] >> 16) & 0xffff);
+
+ return len;
+
+ case 0x7905:
+ if (len != 5)
+ DRM_ERROR("Bad count in 3DSTATE_DEPTH_BUFFER\n");
+ if (count < 5)
+ BUFFER_FAIL(count, len, "3DSTATE_DEPTH_BUFFER");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_DEPTH_BUFFER\n");
+ instr_out(data, hw_offset, 1, "%s, %s, pitch = %d bytes, %stiled\n",
+ get_965_surfacetype(data[1] >> 29),
+ get_965_depthformat((data[1] >> 18) & 0x7),
+ (data[1] & 0x0001ffff) + 1,
+ data[1] & (1 << 27) ? "" : "not ");
+ instr_out(data, hw_offset, 2, "depth offset\n");
+ instr_out(data, hw_offset, 3, "%dx%d\n",
+ ((data[3] & 0x0007ffc0) >> 6) + 1,
+ ((data[3] & 0xfff80000) >> 19) + 1);
+ instr_out(data, hw_offset, 4, "volume depth\n");
+
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
+ opcode++) {
+ if ((data[0] & 0xffff0000) >> 16 == opcodes_3d[opcode].opcode) {
+ unsigned int i;
+ len = 1;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
+ if (opcodes_3d[opcode].max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcodes_3d[opcode].min_len ||
+ len > opcodes_3d[opcode].max_len)
+ {
+ DRM_ERROR("Bad count in %s\n", opcodes_3d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+
+static int
+decode_3d_i830(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d[] = {
+ { 0x02, 1, 1, "3DSTATE_MODES_3" },
+ { 0x03, 1, 1, "3DSTATE_ENABLES_1"},
+ { 0x04, 1, 1, "3DSTATE_ENABLES_2"},
+ { 0x05, 1, 1, "3DSTATE_VFT0"},
+ { 0x06, 1, 1, "3DSTATE_AA"},
+ { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES" },
+ { 0x08, 1, 1, "3DSTATE_MODES_1" },
+ { 0x09, 1, 1, "3DSTATE_STENCIL_TEST" },
+ { 0x0a, 1, 1, "3DSTATE_VFT1"},
+ { 0x0b, 1, 1, "3DSTATE_INDPT_ALPHA_BLEND" },
+ { 0x0c, 1, 1, "3DSTATE_MODES_5" },
+ { 0x0d, 1, 1, "3DSTATE_MAP_BLEND_OP" },
+ { 0x0e, 1, 1, "3DSTATE_MAP_BLEND_ARG" },
+ { 0x0f, 1, 1, "3DSTATE_MODES_2" },
+ { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
+ { 0x16, 1, 1, "3DSTATE_MODES_4" },
+ };
+
+ switch ((data[0] & 0x1f000000) >> 24) {
+ case 0x1f:
+ return decode_3d_primitive(data, count, hw_offset, failures);
+ case 0x1d:
+ return decode_3d_1d(data, count, hw_offset, failures, 1);
+ case 0x1c:
+ return decode_3d_1c(data, count, hw_offset, failures);
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
+ opcode++) {
+ if ((data[0] & 0x1f000000) >> 24 == opcodes_3d[opcode].opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
+ if (opcodes_3d[opcode].max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcodes_3d[opcode].min_len ||
+ len > opcodes_3d[opcode].max_len)
+ {
+ DRM_ERROR("Bad count in %s\n", opcodes_3d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+void i915_gem_command_decode(uint32_t *data, int count, uint32_t hw_offset, struct drm_device *dev)
+{
+ int index = 0;
+ int failures = 0;
+
+ while (index < count) {
+ switch ((data[index] & 0xe0000000) >> 29) {
+ case 0x0:
+ index += decode_mi(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ break;
+ case 0x2:
+ index += decode_2d(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ break;
+ case 0x3:
+ if (IS_I965G(dev)) {
+ index += decode_3d_965(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ } else if (IS_I9XX(dev)) {
+ index += decode_3d(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ } else {
+ index += decode_3d_i830(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ }
+ break;
+ default:
+ instr_out(data, hw_offset, index, "UNKNOWN\n");
+ failures++;
+ index++;
+ break;
+ }
+ }
+}
+
diff --git a/usr/src/uts/intel/io/drm/i915_gem_tiling.c b/usr/src/uts/intel/io/drm/i915_gem_tiling.c
new file mode 100644
index 0000000..8681fb6
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_gem_tiling.c
@@ -0,0 +1,390 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/sysmacros.h>
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+ if (!IS_I9XX(dev)) {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_MOBILE(dev)) {
+ uint32_t dcc;
+
+ /* On mobile 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
+ */
+
+ dcc = I915_READ(DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ break;
+ }
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ */
+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ }
+
+ /* FIXME: check with memory config on IGDNG */
+ if (IS_IGDNG(dev)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+
+/**
+ * Returns the size of the fence for a tiled object of the given size.
+ */
+static int
+i915_get_fence_size(struct drm_device *dev, int size)
+{
+ int i;
+ int start;
+
+ if (IS_I965G(dev)) {
+ /* The 965 can have fences at any page boundary. */
+
+ return (size + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
+ } else {
+ /* Align the size to a power of two greater than the smallest
+ * fence size.
+ */
+ if (IS_I9XX(dev))
+ start = 1024 * 1024;
+ else
+ start = 512 * 1024;
+
+ for (i = start; i < size; i <<= 1)
+ ;
+
+ return i;
+ }
+}
+
+/* Check pitch constriants for all chips & tiling formats */
+static int
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+{
+ int tile_width;
+
+ /* Linear is always fine */
+ if (tiling_mode == I915_TILING_NONE)
+ return 1;
+
+ if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ if (stride == 0)
+ return 0;
+
+ /* 965+ just needs multiples of tile width */
+ if (IS_I965G(dev)) {
+ if (stride & (tile_width - 1))
+ return 0;
+ return 1;
+ }
+
+ /* Pre-965 needs power of two tile widths */
+ if (stride < tile_width)
+ return 0;
+
+ if (!ISP2(stride))
+ return 0;
+
+ /* We don't handle the aperture area covered by the fence being bigger
+ * than the object size.
+ */
+ if (i915_get_fence_size(dev, size) != size)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+/*ARGSUSED*/
+int
+i915_gem_set_tiling(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_set_tiling args;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_set_tiling __user *) data, sizeof(args));
+
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return EINVAL;
+ obj_priv = obj->driver_private;
+
+ if (!i915_tiling_ok(dev, args.stride, obj->size, args.tiling_mode)) {
+ drm_gem_object_unreference(obj);
+ DRM_DEBUG("i915 tiling is not OK");
+ return EINVAL;
+ }
+
+ spin_lock(&dev->struct_mutex);
+
+ if (args.tiling_mode == I915_TILING_NONE) {
+ args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ if (args.tiling_mode == I915_TILING_X)
+ args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ else
+ args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ /* If we can't handle the swizzling, make it untiled. */
+ if (args.swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ args.tiling_mode = I915_TILING_NONE;
+ args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ }
+ }
+
+ if (args.tiling_mode != obj_priv->tiling_mode) {
+ int ret;
+
+ /* Unbind the object, as switching tiling means we're
+ * switching the cache organization due to fencing, probably.
+ */
+ ret = i915_gem_object_unbind(obj, 1);
+ if (ret != 0) {
+ args.tiling_mode = obj_priv->tiling_mode;
+ spin_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ DRM_ERROR("tiling switch!! unbind error %d", ret);
+ return ret;
+ }
+ obj_priv->tiling_mode = args.tiling_mode;
+ }
+ obj_priv->stride = args.stride;
+
+ ret = DRM_COPY_TO_USER((struct drm_i915_gem_set_tiling __user *) data, &args, sizeof(args));
+ if ( ret != 0)
+ DRM_ERROR(" gem set tiling error! %d", ret);
+
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+/*ARGSUSED*/
+int
+i915_gem_get_tiling(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct drm_i915_gem_get_tiling args;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (dev->driver->use_gem != 1)
+ return ENODEV;
+
+ DRM_COPYFROM_WITH_RETURN(&args,
+ (struct drm_i915_gem_get_tiling __user *) data, sizeof(args));
+
+ obj = drm_gem_object_lookup(fpriv, args.handle);
+ if (obj == NULL)
+ return EINVAL;
+ obj_priv = obj->driver_private;
+
+ spin_lock(&dev->struct_mutex);
+
+ args.tiling_mode = obj_priv->tiling_mode;
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_X:
+ args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ break;
+ case I915_TILING_NONE:
+ args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ default:
+ DRM_ERROR("unknown tiling mode\n");
+ }
+
+
+
+ ret = DRM_COPY_TO_USER((struct drm_i915_gem_get_tiling __user *) data, &args, sizeof(args));
+ if ( ret != 0)
+ DRM_ERROR(" gem get tiling error! %d", ret);
+
+ drm_gem_object_unreference(obj);
+ spin_unlock(&dev->struct_mutex);
+
+ return 0;
+}
diff --git a/usr/src/uts/intel/io/drm/i915_irq.c b/usr/src/uts/intel/io/drm/i915_irq.c
new file mode 100644
index 0000000..d4023b5
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_irq.c
@@ -0,0 +1,1052 @@
+/* BEGIN CSTYLED */
+
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+
+#define MAX_NOPID ((u32)~0)
+
+/**
+ * Interrupts that are always left unmasked.
+ *
+ * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
+ * we leave them always unmasked in IMR and then control enabling them through
+ * PIPESTAT alone.
+ */
+
+#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+
+/** Interrupts that we mask and unmask at runtime. */
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+
+/** These are all of the interrupts used by the driver */
+#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
+ I915_INTERRUPT_ENABLE_VAR)
+
+void
+igdng_enable_irq(drm_i915_private_t *dev_priv, u32 mask, int gfx_irq)
+{
+ if (gfx_irq && ((dev_priv->gt_irq_mask_reg & mask) != 0)) {
+ dev_priv->gt_irq_mask_reg &= ~mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ (void) I915_READ(GTIMR);
+ } else if ((dev_priv->irq_mask_reg & mask) != 0) {
+ dev_priv->irq_mask_reg &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(DEIMR);
+
+ }
+}
+
+static inline void
+igdng_disable_irq(drm_i915_private_t *dev_priv, u32 mask, int gfx_irq)
+{
+ if (gfx_irq && ((dev_priv->gt_irq_mask_reg & mask) != mask)) {
+ dev_priv->gt_irq_mask_reg |= mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ (void) I915_READ(GTIMR);
+ } else if ((dev_priv->irq_mask_reg & mask) != mask) {
+ dev_priv->irq_mask_reg |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(DEIMR);
+ }
+}
+
+/* For display hotplug interrupt */
+void
+igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+ dev_priv->irq_mask_reg &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(DEIMR);
+ }
+}
+
+#if 0
+static inline void
+igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != mask) {
+ dev_priv->irq_mask_reg |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(DEIMR);
+ }
+}
+#endif
+
+static inline void
+i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+ dev_priv->irq_mask_reg &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
+}
+
+static inline void
+i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != mask) {
+ dev_priv->irq_mask_reg |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
+}
+
+static inline uint32_t
+i915_pipestat(int pipe)
+{
+ if (pipe == 0)
+ return PIPEASTAT;
+ if (pipe == 1)
+ return PIPEBSTAT;
+ return 0;
+}
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, uint32_t mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
+ u32 reg = i915_pipestat(pipe);
+
+ dev_priv->pipestat[pipe] |= mask;
+ /* Enable the interrupt, clear any pending status */
+ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
+ (void) I915_READ(reg);
+ }
+}
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
+ u32 reg = i915_pipestat(pipe);
+
+ dev_priv->pipestat[pipe] &= ~mask;
+ I915_WRITE(reg, dev_priv->pipestat[pipe]);
+ (void) I915_READ(reg);
+ }
+}
+
+/**
+ * i915_pipe_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+static int
+i915_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+
+ if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long high_frame;
+ unsigned long low_frame;
+ u32 high1, high2, low, count;
+
+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
+ return 0;
+ }
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+ PIPE_FRAME_LOW_SHIFT);
+ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ } while (high1 != high2);
+
+ count = (high1 << 8) | low;
+
+ return count;
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+static void i915_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+#if 0
+ if (dev_priv->first_error)
+ goto out;
+#endif
+ error = drm_alloc(sizeof(*error), DRM_MEM_DRIVER);
+ if (!error) {
+ DRM_DEBUG("out ot memory, not capturing error state\n");
+ goto out;
+ }
+
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->pipeastat = I915_READ(PIPEASTAT);
+ error->pipebstat = I915_READ(PIPEBSTAT);
+ error->instpm = I915_READ(INSTPM);
+ if (!IS_I965G(dev)) {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+ error->acthd = I915_READ(ACTHD);
+ } else {
+ error->ipeir = I915_READ(IPEIR_I965);
+ error->ipehr = I915_READ(IPEHR_I965);
+ error->instdone = I915_READ(INSTDONE_I965);
+ error->instps = I915_READ(INSTPS);
+ error->instdone1 = I915_READ(INSTDONE1);
+ error->acthd = I915_READ(ACTHD_I965);
+ }
+
+ (void) uniqtime(&error->time);
+
+ dev_priv->first_error = error;
+
+ DRM_DEBUG("Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
+ DRM_DEBUG("EIR: 0x%08x\n", error->eir);
+ DRM_DEBUG(" PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ DRM_DEBUG(" INSTPM: 0x%08x\n", error->instpm);
+ DRM_DEBUG(" IPEIR: 0x%08x\n", error->ipeir);
+ DRM_DEBUG(" IPEHR: 0x%08x\n", error->ipehr);
+ DRM_DEBUG(" INSTDONE: 0x%08x\n", error->instdone);
+ DRM_DEBUG(" ACTHD: 0x%08x\n", error->acthd);
+ DRM_DEBUG(" DMA_FADD_P: 0x%08x\n", I915_READ(0x2078));
+ if (IS_I965G(dev)) {
+ DRM_DEBUG(" INSTPS: 0x%08x\n", error->instps);
+ DRM_DEBUG(" INSTDONE1: 0x%08x\n", error->instdone1);
+ }
+ drm_free(error, sizeof(*error), DRM_MEM_DRIVER);
+out:
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 eir = I915_READ(EIR);
+ u32 pipea_stats = I915_READ(PIPEASTAT);
+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
+
+ i915_capture_error_state(dev);
+
+ DRM_DEBUG("render error detected, EIR: 0x%08x\n",
+ eir);
+
+ if (IS_G4X(dev)) {
+ if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ DRM_DEBUG(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR_I965));
+ DRM_DEBUG(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR_I965));
+ DRM_DEBUG(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE_I965));
+ DRM_DEBUG(" INSTPS: 0x%08x\n",
+ I915_READ(INSTPS));
+ DRM_DEBUG(" INSTDONE1: 0x%08x\n",
+ I915_READ(INSTDONE1));
+ DRM_DEBUG(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ (void)I915_READ(IPEIR_I965);
+ }
+ if (eir & GM45_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ DRM_DEBUG("page table error\n");
+ DRM_DEBUG(" PGTBL_ER: 0x%08x\n",
+ pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ (void)I915_READ(PGTBL_ER);
+ }
+ }
+
+ if (IS_I9XX(dev)) {
+ if (eir & I915_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ DRM_DEBUG("page table error\n");
+ DRM_DEBUG("PGTBL_ER: 0x%08x\n",
+ pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ (void)I915_READ(PGTBL_ER);
+ }
+ }
+
+ if (eir & I915_ERROR_MEMORY_REFRESH) {
+ DRM_DEBUG("memory refresh error\n");
+ DRM_DEBUG("PIPEASTAT: 0x%08x\n",
+ pipea_stats);
+ DRM_DEBUG("PIPEBSTAT: 0x%08x\n",
+ pipeb_stats);
+ /* pipestat has already been acked */
+ }
+ if (eir & I915_ERROR_INSTRUCTION) {
+ DRM_DEBUG("instruction error\n");
+ DRM_DEBUG(" INSTPM: 0x%08x\n",
+ I915_READ(INSTPM));
+ if (!IS_I965G(dev)) {
+ u32 ipeir = I915_READ(IPEIR);
+
+ DRM_DEBUG(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR));
+ DRM_DEBUG(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR));
+ DRM_DEBUG(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE));
+ DRM_DEBUG(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD));
+ I915_WRITE(IPEIR, ipeir);
+ (void)I915_READ(IPEIR);
+ } else {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ DRM_DEBUG(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR_I965));
+ DRM_DEBUG(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR_I965));
+ DRM_DEBUG(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE_I965));
+ DRM_DEBUG(" INSTPS: 0x%08x\n",
+ I915_READ(INSTPS));
+ DRM_DEBUG(" INSTDONE1: 0x%08x\n",
+ I915_READ(INSTDONE1));
+ DRM_DEBUG(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ (void)I915_READ(IPEIR_I965);
+ }
+ }
+
+ I915_WRITE(EIR, eir);
+ (void)I915_READ(EIR);
+ eir = I915_READ(EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_DEBUG("EIR stuck: 0x%08x, masking\n", eir);
+ I915_WRITE(EMR, I915_READ(EMR) | eir);
+ I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ }
+
+}
+
+u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
+ return 0;
+ }
+
+ return I915_READ(reg);
+}
+
+irqreturn_t igdng_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int ret = IRQ_NONE;
+ u32 de_iir, gt_iir, de_ier;
+ u32 new_de_iir, new_gt_iir;
+ int vblank = 0;
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ (void)I915_READ(DEIER);
+
+ de_iir = I915_READ(DEIIR);
+ gt_iir = I915_READ(GTIIR);
+
+ for (;;) {
+ if (de_iir == 0 && gt_iir == 0)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ I915_WRITE(DEIIR, de_iir);
+ new_de_iir = I915_READ(DEIIR);
+ I915_WRITE(GTIIR, gt_iir);
+ new_gt_iir = I915_READ(GTIIR);
+
+ if (dev_priv->sarea_priv) {
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+ }
+
+ if (gt_iir & GT_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
+ if (de_iir & DE_PIPEA_VBLANK) {
+ vblank++;
+ drm_handle_vblank(dev, 0);
+ }
+
+ if (de_iir & DE_PIPEB_VBLANK) {
+ vblank++;
+ drm_handle_vblank(dev, 1);
+ }
+
+ de_iir = new_de_iir;
+ gt_iir = new_gt_iir;
+ }
+
+ I915_WRITE(DEIER, de_ier);
+ (void)I915_READ(DEIER);
+
+ return ret;
+}
+
+irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+{
+ drm_device_t *dev = (drm_device_t *) (void *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir;
+ u32 pipea_stats = 0, pipeb_stats = 0;
+ int vblank = 0;
+
+ if (IS_IGDNG(dev))
+ return igdng_irq_handler(dev);
+
+ iir = I915_READ(IIR);
+
+ if (iir == 0) {
+ return IRQ_NONE;
+ }
+start:
+
+ if (dev_priv->sarea_priv) {
+ if (dev_priv->hw_status_page)
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ }
+
+ I915_WRITE(IIR, iir);
+
+ (void) I915_READ(IIR); /* Flush posted writes */
+
+
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev);
+
+ if (iir & I915_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
+
+ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
+ pipea_stats = I915_READ(PIPEASTAT);
+
+ /* The vblank interrupt gets enabled even if we didn't ask for
+ it, so make sure it's shut down again */
+ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
+ pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+ else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+ PIPE_VBLANK_INTERRUPT_STATUS))
+ {
+ vblank++;
+ drm_handle_vblank(dev, 0);
+ }
+
+ I915_WRITE(PIPEASTAT, pipea_stats);
+ }
+ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
+ pipeb_stats = I915_READ(PIPEBSTAT);
+
+ /* The vblank interrupt gets enabled even if we didn't ask for
+ it, so make sure it's shut down again */
+ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
+ pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+ else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+ PIPE_VBLANK_INTERRUPT_STATUS))
+ {
+ vblank++;
+ drm_handle_vblank(dev, 1);
+ }
+
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
+ }
+ return IRQ_HANDLED;
+
+}
+
+int i915_emit_irq(drm_device_t * dev)
+{
+
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ i915_kernel_lost_context(dev);
+
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 1;
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+#if defined(__i386)
+ if (IS_GM45(dev)) {
+ BEGIN_LP_RING(3);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ ADVANCE_LP_RING();
+
+ (void) READ_BREADCRUMB(dev_priv);
+ BEGIN_LP_RING(2);
+ OUT_RING(0);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ } else {
+#endif /* __i386 */
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+#if defined(__i386)
+ }
+#endif /* __i386 */
+
+#if defined(__i386)
+ if (IS_I965GM(dev) || IS_IGDNG(dev) || IS_GM45(dev))
+#else
+ if (IS_I965GM(dev) || IS_IGDNG(dev))
+#endif /* __i386 */
+ {
+ (void) READ_BREADCRUMB(dev_priv);
+ BEGIN_LP_RING(2);
+ OUT_RING(0);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ (void) READ_BREADCRUMB(dev_priv);
+ }
+
+ return dev_priv->counter;
+}
+
+void i915_user_irq_on(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ spin_lock(&dev_priv->user_irq_lock);
+ if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
+ if (IS_IGDNG(dev))
+ igdng_enable_irq(dev_priv, GT_USER_INTERRUPT, 1);
+ else
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock(&dev_priv->user_irq_lock);
+
+}
+
+void i915_user_irq_off(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ spin_lock(&dev_priv->user_irq_lock);
+ if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
+ if (IS_IGDNG(dev))
+ igdng_disable_irq(dev_priv, GT_USER_INTERRUPT, 1);
+ else
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock(&dev_priv->user_irq_lock);
+}
+
+
+static int i915_wait_irq(drm_device_t * dev, int irq_nr)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int ret = 0;
+ int wait_time = 0;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+waitmore:
+ wait_time++;
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (dev_priv->sarea_priv) {
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+ return 0;
+ }
+ DRM_DEBUG("i915_wait_irq: irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv));
+ i915_user_irq_on(dev);
+ DRM_WAIT_ON(ret, &dev_priv->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ i915_user_irq_off(dev);
+
+ if (ret == EBUSY) {
+ if (wait_time > 5) {
+ DRM_DEBUG("%d: EBUSY -- rec: %d emitted: %d\n",
+ ret,
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ return ret;
+ }
+ goto waitmore;
+ }
+
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+ if (ret == EINTR) {
+ if (wait_time > 5) {
+ DRM_DEBUG("EINTR wait %d now %d", dev_priv->counter, READ_BREADCRUMB(dev_priv));
+ return ret;
+ }
+ goto waitmore;
+ }
+
+ return ret;
+}
+
+
+/* Needs the lock as it touches the ring.
+ */
+/*ARGSUSED*/
+int i915_irq_emit(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t emit;
+ int result;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_i915_irq_emit32_t irq_emit32;
+
+ DRM_COPYFROM_WITH_RETURN(&irq_emit32,
+ (drm_i915_irq_emit32_t __user *) data,
+ sizeof (drm_i915_irq_emit32_t));
+ emit.irq_seq = (int __user *)(uintptr_t)irq_emit32.irq_seq;
+ } else
+ DRM_COPYFROM_WITH_RETURN(&emit,
+ (drm_i915_irq_emit_t __user *) data, sizeof(emit));
+
+ spin_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
+ spin_unlock(&dev->struct_mutex);
+
+ if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return (EFAULT);
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+/*ARGSUSED*/
+int i915_irq_wait(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t irqwait;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&irqwait,
+ (drm_i915_irq_wait_t __user *) data, sizeof(irqwait));
+
+ return i915_wait_irq(dev, irqwait.irq_seq);
+}
+
+static void igdng_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vblank;
+
+ if (pipe == 0)
+ vblank = DE_PIPEA_VBLANK;
+ else
+ vblank = DE_PIPEB_VBLANK;
+
+ if ((dev_priv->de_irq_enable_reg & vblank) == 0) {
+ igdng_enable_irq(dev_priv, vblank, 0);
+ dev_priv->de_irq_enable_reg |= vblank;
+ I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
+ (void) I915_READ(DEIER);
+ }
+}
+
+static void igdng_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vblank;
+
+ if (pipe == 0)
+ vblank = DE_PIPEA_VBLANK;
+ else
+ vblank = DE_PIPEB_VBLANK;
+
+ if ((dev_priv->de_irq_enable_reg & vblank) != 0) {
+ igdng_disable_irq(dev_priv, vblank, 0);
+ dev_priv->de_irq_enable_reg &= ~vblank;
+ I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
+ (void) I915_READ(DEIER);
+ }
+}
+
+int i915_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 pipeconf;
+
+ pipeconf = I915_READ(pipeconf_reg);
+ if (!(pipeconf & PIPEACONF_ENABLE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (IS_IGDNG(dev))
+ igdng_enable_vblank(dev, pipe);
+ else if (IS_I965G(dev))
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ else
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+ return 0;
+}
+
+void i915_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (IS_IGDNG(dev))
+ igdng_disable_vblank(dev, pipe);
+ else
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+/* Set the vblank monitor pipe
+ */
+/*ARGSUSED*/
+int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return (-EINVAL);
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t pipe;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&pipe, (drm_i915_vblank_pipe_t __user *)data, sizeof (pipe));
+
+ pipe.pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+/*ARGSUSED*/
+int i915_vblank_swap(DRM_IOCTL_ARGS)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+
+}
+
+/* drm_dma.h hooks
+*/
+
+static void igdng_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ /* XXX hotplug from PCH */
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ (void) I915_READ(DEIER);
+
+ /* and GT */
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ (void) I915_READ(GTIER);
+}
+
+static int igdng_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
+ u32 render_mask = GT_USER_INTERRUPT;
+
+ dev_priv->irq_mask_reg = ~display_mask;
+ dev_priv->de_irq_enable_reg = display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ (void) I915_READ(DEIIR);
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
+ (void) I915_READ(DEIER);
+
+ /* user interrupt should be enabled, but masked initial */
+ dev_priv->gt_irq_mask_reg = 0xffffffff;
+ dev_priv->gt_irq_enable_reg = render_mask;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ (void) I915_READ(GTIIR);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
+ (void) I915_READ(GTIER);
+
+ return 0;
+}
+
+static void igdng_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+}
+
+int i915_driver_irq_preinstall(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv->mmio_map)
+ return -EINVAL;
+
+ if (IS_IGDNG(dev)) {
+ igdng_irq_preinstall(dev);
+ return 0;
+ }
+
+ I915_WRITE16(HWSTAM, 0xeffe);
+ I915_WRITE(PIPEASTAT, 0);
+ I915_WRITE(PIPEBSTAT, 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE16(IER, 0x0);
+ (void) I915_READ(IER);
+
+ return 0;
+}
+
+void i915_driver_irq_postinstall(drm_device_t * dev)
+{
+ int error_mask;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ if (IS_IGDNG(dev)) {
+ (void) igdng_irq_postinstall(dev);
+ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue, DRM_INTR_PRI(dev));
+ return;
+ }
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /*
+ * Enable some error detection, note the instruction error mask
+ * bit is reserved, so we leave it masked.
+ */
+ if (IS_G4X(dev)) {
+ error_mask = ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ } else {
+ error_mask = ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+ }
+ I915_WRITE(EMR, error_mask);
+
+ /* Disable pipe interrupt enables, clear pending pipe status */
+ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
+ I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ (void) I915_READ(PIPEASTAT);
+ (void) I915_READ(PIPEBSTAT);
+ /* Clear pending interrupt status */
+ I915_WRITE(IIR, I915_READ(IIR));
+
+ (void) I915_READ(IIR);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+ (void) I915_READ(IER);
+
+ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue, DRM_INTR_PRI(dev));
+
+ return;
+}
+
+void i915_driver_irq_uninstall(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ if ((!dev_priv) || (dev->irq_enabled == 0))
+ return;
+
+ dev_priv->vblank_pipe = 0;
+
+ if (IS_IGDNG(dev)) {
+ igdng_irq_uninstall(dev);
+ DRM_FINI_WAITQUEUE(&dev_priv->irq_queue);
+ return;
+ }
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PIPEASTAT, 0);
+ I915_WRITE(PIPEBSTAT, 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
+ I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ I915_WRITE(IIR, I915_READ(IIR));
+
+ DRM_FINI_WAITQUEUE(&dev_priv->irq_queue);
+}
diff --git a/usr/src/uts/intel/io/drm/i915_mem.c b/usr/src/uts/intel/io/drm/i915_mem.c
new file mode 100644
index 0000000..445bfa1
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/i915_mem.c
@@ -0,0 +1,425 @@
+/* BEGIN CSTYLED */
+
+/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/* This memory manager is integrated into the global/local lru
+ * mechanisms used by the clients. Specifically, it operates by
+ * setting the 'in_use' fields of the global LRU to indicate whether
+ * this region is privately allocated to a client.
+ *
+ * This does require the client to actually respect that field.
+ *
+ * Currently no effort is made to allocate 'private' memory in any
+ * clever way - the LRU information isn't used to determine which
+ * block to allocate, and the ring is drained prior to allocations --
+ * in other words allocation is expensive.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_tex_region_t *list;
+ unsigned shift, nr;
+ unsigned start;
+ unsigned end;
+ unsigned i;
+ int age;
+
+ shift = dev_priv->tex_lru_log_granularity;
+ nr = I915_NR_TEX_REGIONS;
+
+ start = p->start >> shift;
+ end = (p->start + p->size - 1) >> shift;
+
+ age = ++sarea_priv->texAge;
+ list = sarea_priv->texList;
+
+ /* Mark the regions with the new flag and update their age. Move
+ * them to head of list to preserve LRU semantics.
+ */
+ for (i = start; i <= end; i++) {
+ list[i].in_use = (unsigned char)in_use;
+ list[i].age = age;
+
+ /* remove_from_list(i)
+ */
+ list[(unsigned)list[i].next].prev = list[i].prev;
+ list[(unsigned)list[i].prev].next = list[i].next;
+
+ /* insert_at_head(list, i)
+ */
+ list[i].prev = (unsigned char)nr;
+ list[i].next = list[nr].next;
+ list[(unsigned)list[nr].next].prev = (unsigned char)i;
+ list[nr].next = (unsigned char)i;
+ }
+}
+
+/* Very simple allocator for agp memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size, drm_file_t *fpriv)
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->filp = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->filp = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+ out:
+ /* Our block is in the middle */
+ p->filp = fpriv;
+ return (p);
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+ int align2, drm_file_t *fpriv)
+{
+ struct mem_block *p;
+ int mask = (1 << align2) - 1;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ int start = (p->start + mask) & ~mask;
+ if (p->filp == NULL && start + size <= p->start + p->size)
+ return split_block(p, start, size, fpriv);
+ }
+
+ return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next)
+ if (p->start == start)
+ return (p);
+
+ return (NULL);
+}
+
+struct mem_block *find_block_by_proc(struct mem_block *heap, drm_file_t *fpriv)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next)
+ if (p->filp == fpriv)
+ return (p);
+
+ return (NULL);
+}
+
+void free_block(struct mem_block *p)
+{
+ p->filp = NULL;
+
+ /* Assumes a single contiguous range. Needs a special filp in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->filp == NULL) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+ }
+
+ if (p->prev->filp == NULL) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
+ }
+}
+
+/* Initialize. How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
+
+ if (!blocks)
+ return (ENOMEM);
+
+ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
+ if (!*heap) {
+ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
+ return (ENOMEM);
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->filp = NULL;
+ blocks->next = blocks->prev = *heap;
+
+ (void) memset(*heap, 0, sizeof(**heap));
+ (*heap)->filp = (drm_file_t *) - 1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return (0);
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void i915_mem_release(drm_device_t * dev, drm_file_t *fpriv, struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ if (p->filp == fpriv) {
+ p->filp = NULL;
+ mark_block(dev, p, 0);
+ }
+ }
+
+ /* Assumes a single contiguous range. Needs a special filp in
+ * 'heap' to stop it being subsumed.
+ */
+ for (p = heap->next; p != heap; p = p->next) {
+ while (p->filp == NULL && p->next->filp == NULL) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+ }
+ }
+}
+
+/* Shutdown.
+ */
+void i915_mem_takedown(struct mem_block **heap)
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next; p != *heap;) {
+ struct mem_block *q = p;
+ p = p->next;
+ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+ }
+
+ drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
+ *heap = NULL;
+}
+
+struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
+{
+ switch (region) {
+ case I915_MEM_REGION_AGP:
+ return (&dev_priv->agp_heap);
+ default:
+ return (NULL);
+ }
+}
+
+/* IOCTL HANDLERS */
+
+/*ARGSUSED*/
+int i915_mem_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_mem_alloc_t alloc;
+ struct mem_block *block, **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_i915_mem_alloc32_t alloc32;
+
+ DRM_COPYFROM_WITH_RETURN(&alloc32, (void *)data, sizeof (alloc32));
+ alloc.region = alloc32.region;
+ alloc.alignment = alloc32.alignment;
+ alloc.size = alloc32.size;
+ alloc.region_offset = (int *)(uintptr_t)alloc32.region_offset;
+ } else
+ DRM_COPYFROM_WITH_RETURN(&alloc, (void *) data, sizeof(alloc));
+
+ heap = get_heap(dev_priv, alloc.region);
+ if (!heap || !*heap)
+ return (EFAULT);
+
+ /* Make things easier on ourselves: all allocations at least
+ * 4k aligned.
+ */
+ if (alloc.alignment < 12)
+ alloc.alignment = 12;
+
+ block = alloc_block(*heap, alloc.size, alloc.alignment, fpriv);
+
+ if (!block)
+ return (ENOMEM);
+
+ mark_block(dev, block, 1);
+
+ if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int i915_mem_free(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_mem_free_t memfree;
+ struct mem_block *block, **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&memfree, (void *)data, sizeof(memfree));
+
+ heap = get_heap(dev_priv, memfree.region);
+ if (!heap || !*heap)
+ return (EFAULT);
+
+ block = find_block(*heap, memfree.region_offset);
+ if (!block)
+ return (EFAULT);
+
+ if (block->filp != fpriv)
+ return (EPERM);
+
+ mark_block(dev, block, 0);
+ free_block(block);
+ return (0);
+}
+
+/*ARGSUSED*/
+int i915_mem_init_heap(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_mem_init_heap_t initheap;
+ struct mem_block **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&initheap, (void *)data, sizeof(initheap));
+
+ heap = get_heap(dev_priv, initheap.region);
+ if (!heap)
+ return (EFAULT);
+
+ if (*heap) {
+ DRM_ERROR("heap already initialized?");
+ return (EFAULT);
+ }
+
+ return init_heap(heap, initheap.start, initheap.size);
+}
+
+/*ARGSUSED*/
+int i915_mem_destroy_heap(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_mem_destroy_heap_t destroyheap;
+ struct mem_block **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&destroyheap, (void *)data, sizeof(destroyheap));
+
+ heap = get_heap(dev_priv, destroyheap.region);
+ if (!heap) {
+ DRM_ERROR("get_heap failed");
+ return (EFAULT);
+ }
+
+ if (!*heap) {
+ DRM_ERROR("heap not initialized?");
+ return (EFAULT);
+ }
+
+ i915_mem_takedown(heap);
+ return (0);
+}
diff --git a/usr/src/uts/intel/io/drm/r300_cmdbuf.c b/usr/src/uts/intel/io/drm/r300_cmdbuf.c
new file mode 100644
index 0000000..bc2eb7b
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/r300_cmdbuf.c
@@ -0,0 +1,987 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc. 2002.
+ * Copyright (C) 2004 Nicolai Haehnle.
+ * All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Nicolai Haehnle <prefect_@gmx.net>
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drm.h"
+#include "radeon_drm.h"
+#include "drmP.h"
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#define R300_SIMULTANEOUS_CLIPRECTS 4
+
+/*
+ * Values for R300_RE_CLIPRECT_CNTL depending on the number of
+ * cliprects
+ */
+static const int r300_cliprect_cntl[4] = {
+ 0xAAAA,
+ 0xEEEE,
+ 0xFEFE,
+ 0xFFFE
+};
+
+/*
+ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
+ * buffer, starting with index n.
+ */
+static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf, int n)
+{
+ drm_clip_rect_t box;
+ int nr;
+ int i;
+ RING_LOCALS;
+
+ nr = cmdbuf->nbox - n;
+ if (nr > R300_SIMULTANEOUS_CLIPRECTS)
+ nr = R300_SIMULTANEOUS_CLIPRECTS;
+
+ DRM_DEBUG("%i cliprects\n", nr);
+
+ if (nr) {
+ BEGIN_RING(6 + nr * 2);
+ OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+ for (i = 0; i < nr; ++i) {
+ if (DRM_COPY_FROM_USER_UNCHECKED
+ (&box, &cmdbuf->boxes[n + i], sizeof (box))) {
+ DRM_ERROR("copy cliprect faulted\n");
+ return (EFAULT);
+ }
+
+ box.x1 =
+ (box.x1 +
+ R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+ box.y1 =
+ (box.y1 +
+ R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+ box.x2 =
+ (box.x2 +
+ R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+ box.y2 =
+ (box.y2 +
+ R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+
+ OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
+ (box.y1 << R300_CLIPRECT_Y_SHIFT));
+ OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
+ (box.y2 << R300_CLIPRECT_Y_SHIFT));
+ }
+
+ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
+
+ /*
+ * TODO/SECURITY: Force scissors to a safe value, otherwise
+ * the client might be able to trample over memory.
+ * The impact should be very limited, but I'd rather be safe
+ * than sorry.
+ */
+ OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
+ OUT_RING(0);
+ OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
+ ADVANCE_RING();
+ } else {
+ /*
+ * Why we allow zero cliprect rendering:
+ * There are some commands in a command buffer that must be
+ * submitted even when there are no cliprects, e.g. DMA buffer
+ * discard or state setting (though state setting could be
+ * avoided by simulating a loss of context).
+ *
+ * Now since the cmdbuf interface is so chaotic right now (and
+ * is bound to remain that way for a bit until things settle
+ * down), it is basically impossible to filter out the commands
+ * that are necessary and those that aren't.
+ *
+ * So I choose the safe way and don't do any filtering at all;
+ * instead, I simply set up the engine so that all rendering
+ * can't produce any fragments.
+ */
+ BEGIN_RING(2);
+ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
+ ADVANCE_RING();
+ }
+
+ return (0);
+}
+
+static u8 r300_reg_flags[0x10000 >> 2];
+
+void
+r300_init_reg_flags(void)
+{
+ int i;
+ (void) memset(r300_reg_flags, 0, 0x10000 >> 2);
+#define ADD_RANGE_MARK(reg, count, mark) \
+ for (i = ((reg) >> 2); i < ((reg) >> 2) + (count); i++)\
+ r300_reg_flags[i] |= (mark);
+
+#define MARK_SAFE 1
+#define MARK_CHECK_OFFSET 2
+
+#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
+
+ /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
+ ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
+ ADD_RANGE(0x2080, 1);
+ ADD_RANGE(R300_SE_VTE_CNTL, 2);
+ ADD_RANGE(0x2134, 2);
+ ADD_RANGE(0x2140, 1);
+ ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
+ ADD_RANGE(0x21DC, 1);
+ ADD_RANGE(0x221C, 1);
+ ADD_RANGE(0x2220, 4);
+ ADD_RANGE(0x2288, 1);
+ ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
+ ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
+ ADD_RANGE(R300_GB_ENABLE, 1);
+ ADD_RANGE(R300_GB_MSPOS0, 5);
+ ADD_RANGE(R300_TX_CNTL, 1);
+ ADD_RANGE(R300_TX_ENABLE, 1);
+ ADD_RANGE(0x4200, 4);
+ ADD_RANGE(0x4214, 1);
+ ADD_RANGE(R300_RE_POINTSIZE, 1);
+ ADD_RANGE(0x4230, 3);
+ ADD_RANGE(R300_RE_LINE_CNT, 1);
+ ADD_RANGE(0x4238, 1);
+ ADD_RANGE(0x4260, 3);
+ ADD_RANGE(0x4274, 4);
+ ADD_RANGE(0x4288, 5);
+ ADD_RANGE(0x42A0, 1);
+ ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
+ ADD_RANGE(0x42B4, 1);
+ ADD_RANGE(R300_RE_CULL_CNTL, 1);
+ ADD_RANGE(0x42C0, 2);
+ ADD_RANGE(R300_RS_CNTL_0, 2);
+ ADD_RANGE(R300_RS_INTERP_0, 8);
+ ADD_RANGE(R300_RS_ROUTE_0, 8);
+ ADD_RANGE(0x43A4, 2);
+ ADD_RANGE(0x43E8, 1);
+ ADD_RANGE(R300_PFS_CNTL_0, 3);
+ ADD_RANGE(R300_PFS_NODE_0, 4);
+ ADD_RANGE(R300_PFS_TEXI_0, 64);
+ ADD_RANGE(0x46A4, 5);
+ ADD_RANGE(R300_PFS_INSTR0_0, 64);
+ ADD_RANGE(R300_PFS_INSTR1_0, 64);
+ ADD_RANGE(R300_PFS_INSTR2_0, 64);
+ ADD_RANGE(R300_PFS_INSTR3_0, 64);
+ ADD_RANGE(0x4BC0, 1);
+ ADD_RANGE(0x4BC8, 3);
+ ADD_RANGE(R300_PP_ALPHA_TEST, 2);
+ ADD_RANGE(0x4BD8, 1);
+ ADD_RANGE(R300_PFS_PARAM_0_X, 64);
+ ADD_RANGE(0x4E00, 1);
+ ADD_RANGE(R300_RB3D_CBLEND, 2);
+ ADD_RANGE(R300_RB3D_COLORMASK, 1);
+ ADD_RANGE(0x4E10, 3);
+ ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);
+ /* check offset */
+ ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
+ ADD_RANGE(0x4E50, 9);
+ ADD_RANGE(0x4E88, 1);
+ ADD_RANGE(0x4EA0, 2);
+ ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
+ ADD_RANGE(0x4F10, 4);
+ ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);
+ /* check offset */
+ ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
+ ADD_RANGE(0x4F28, 1);
+ ADD_RANGE(0x4F30, 2);
+ ADD_RANGE(0x4F44, 1);
+ ADD_RANGE(0x4F54, 1);
+
+ ADD_RANGE(R300_TX_FILTER_0, 16);
+ ADD_RANGE(R300_TX_FILTER1_0, 16);
+ ADD_RANGE(R300_TX_SIZE_0, 16);
+ ADD_RANGE(R300_TX_FORMAT_0, 16);
+ ADD_RANGE(R300_TX_PITCH_0, 16);
+ /* Texture offset is dangerous and needs more checking */
+ ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
+ ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
+ ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
+
+ /* Sporadic registers used as primitives are emitted */
+ ADD_RANGE(0x4f18, 1);
+ ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
+ ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
+ ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+
+}
+
+static __inline__ int r300_check_range(unsigned reg, int count)
+{
+ int i;
+ if (reg & ~0xffff)
+ return (-1);
+ for (i = (reg >> 2); i < (reg >> 2) + count; i++)
+ if (r300_reg_flags[i] != MARK_SAFE)
+ return (1);
+ return (0);
+}
+
+static inline int
+r300_emit_carefully_checked_packet0(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf, drm_r300_cmd_header_t header)
+{
+ int reg;
+ int sz;
+ int i;
+ int values[64];
+ RING_LOCALS;
+
+ sz = header.packet0.count;
+ reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+ if ((sz > 64) || (sz < 0)) {
+ DRM_ERROR("Cannot emit more than 64 values at a time "
+ "(reg=%04x sz=%d)\n", reg, sz);
+ return (EINVAL);
+ }
+ for (i = 0; i < sz; i++) {
+ values[i] = ((int *)(uintptr_t)cmdbuf->buf)[i];
+ switch (r300_reg_flags[(reg >> 2) + i]) {
+ case MARK_SAFE:
+ break;
+ case MARK_CHECK_OFFSET:
+ if (!RADEON_CHECK_OFFSET(dev_priv, (u32) values[i])) {
+ DRM_ERROR("Offset failed range check "
+ "(reg=%04x sz=%d)\n", reg, sz);
+ return (EINVAL);
+ }
+ break;
+ default:
+ DRM_ERROR("Register %04x failed check as flag=%02x\n",
+ reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+ return (EINVAL);
+ }
+ }
+
+ BEGIN_RING(1 + sz);
+ OUT_RING(CP_PACKET0(reg, sz - 1));
+ OUT_RING_TABLE(values, sz);
+ ADVANCE_RING();
+
+ cmdbuf->buf += sz * 4;
+ cmdbuf->bufsz -= sz * 4;
+
+ return (0);
+}
+
+/*
+ * Emits a packet0 setting arbitrary registers.
+ * Called by r300_do_cp_cmdbuf.
+ *
+ * Note that checks are performed on contents and addresses of the registers
+ */
+static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ int reg;
+ int sz;
+ RING_LOCALS;
+
+ sz = header.packet0.count;
+ reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+ if (!sz)
+ return (0);
+
+ if (sz * 4 > cmdbuf->bufsz)
+ return (EINVAL);
+
+ if (reg + sz * 4 >= 0x10000) {
+ DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n",
+ reg, sz);
+ return (EINVAL);
+ }
+
+ if (r300_check_range(reg, sz)) {
+ /* go and check everything */
+ return (r300_emit_carefully_checked_packet0(dev_priv,
+ cmdbuf, header));
+ }
+ /*
+ * the rest of the data is safe to emit, whatever the values
+ * the user passed
+ */
+
+ BEGIN_RING(1 + sz);
+ OUT_RING(CP_PACKET0(reg, sz - 1));
+ OUT_RING_TABLE(cmdbuf->buf, sz);
+ ADVANCE_RING();
+
+ cmdbuf->buf += sz * 4;
+ cmdbuf->bufsz -= sz * 4;
+
+ return (0);
+}
+
+/*
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static inline int r300_emit_vpu(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf, drm_r300_cmd_header_t header)
+{
+ int sz;
+ int addr;
+ RING_LOCALS;
+
+ sz = header.vpu.count;
+ addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
+
+ if (!sz)
+ return (0);
+ if (sz * 16 > cmdbuf->bufsz)
+ return (EINVAL);
+
+ BEGIN_RING(5 + sz * 4);
+ /* Wait for VAP to come to senses.. */
+ /*
+ * there is no need to emit it multiple times, (only once before
+ * VAP is programmed, but this optimization is for later
+ */
+ OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
+ OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
+ OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
+ OUT_RING_TABLE(cmdbuf->buf, sz * 4);
+
+ ADVANCE_RING();
+
+ cmdbuf->buf += sz * 16;
+ cmdbuf->bufsz -= sz * 16;
+
+ return (0);
+}
+
+/*
+ * Emit a clear packet from userspace.
+ * Called by r300_emit_packet3.
+ */
+static inline int r300_emit_clear(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ RING_LOCALS;
+
+ if (8 * 4 > cmdbuf->bufsz)
+ return (EINVAL);
+
+ BEGIN_RING(10);
+ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
+ OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
+ (1 << R300_PRIM_NUM_VERTICES_SHIFT));
+ OUT_RING_TABLE(cmdbuf->buf, 8);
+ ADVANCE_RING();
+
+ cmdbuf->buf += 8 * 4;
+ cmdbuf->bufsz -= 8 * 4;
+
+ return (0);
+}
+
+static inline int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf, u32 header)
+{
+ int count, i, k;
+#define MAX_ARRAY_PACKET 64
+ u32 payload[MAX_ARRAY_PACKET];
+ u32 narrays;
+ RING_LOCALS;
+
+ count = (header >> 16) & 0x3fff;
+
+ if ((count + 1) > MAX_ARRAY_PACKET) {
+ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+ count);
+ return (EINVAL);
+ }
+ (void) memset(payload, 0, MAX_ARRAY_PACKET * 4);
+ (void) memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
+
+ /* carefully check packet contents */
+
+ narrays = payload[0];
+ k = 0;
+ i = 1;
+ while ((k < narrays) && (i < (count + 1))) {
+ i++; /* skip attribute field */
+ if (!RADEON_CHECK_OFFSET(dev_priv, payload[i])) {
+ DRM_ERROR("Offset failed range check (k=%d i=%d) "
+ "while processing 3D_LOAD_VBPNTR packet.\n",
+ k, i);
+ return (EINVAL);
+ }
+ k++;
+ i++;
+ if (k == narrays)
+ break;
+ /* have one more to process, they come in pairs */
+ if (!RADEON_CHECK_OFFSET(dev_priv, payload[i])) {
+ DRM_ERROR("Offset failed range check (k=%d i=%d) "
+ "while processing 3D_LOAD_VBPNTR packet.\n",
+ k, i);
+ return (EINVAL);
+ }
+ k++;
+ i++;
+ }
+ /* do the counts match what we expect ? */
+ if ((k != narrays) || (i != (count + 1))) {
+ DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet "
+ "(k=%d i=%d narrays=%d count+1=%d).\n",
+ k, i, narrays, count + 1);
+ return (EINVAL);
+ }
+
+ /* all clear, output packet */
+
+ BEGIN_RING(count + 2);
+ OUT_RING(header);
+ OUT_RING_TABLE(payload, count + 1);
+ ADVANCE_RING();
+
+ cmdbuf->buf += (count + 2) * 4;
+ cmdbuf->bufsz -= (count + 2) * 4;
+
+ return (0);
+}
+
+static inline int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ u32 *cmd = (u32 *)(uintptr_t)cmdbuf->buf;
+ int count, ret;
+ RING_LOCALS;
+
+ count = (cmd[0] >> 16) & 0x3fff;
+
+ if (cmd[0] & 0x8000) {
+ u32 offset;
+
+ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ offset = cmd[2] << 10;
+ ret = !RADEON_CHECK_OFFSET(dev_priv, offset);
+ if (ret) {
+ DRM_ERROR("Invalid bitblt first offset "
+ "is %08X\n", offset);
+ return (EINVAL);
+ }
+ }
+
+ if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ offset = cmd[3] << 10;
+ ret = !RADEON_CHECK_OFFSET(dev_priv, offset);
+ if (ret) {
+ DRM_ERROR("Invalid bitblt second offset "
+ "is %08X\n", offset);
+ return (EINVAL);
+ }
+
+ }
+ }
+
+ BEGIN_RING(count+2);
+ OUT_RING(cmd[0]);
+ OUT_RING_TABLE((cmdbuf->buf + 4), count + 1);
+ ADVANCE_RING();
+
+ cmdbuf->buf += (count+2)*4;
+ cmdbuf->bufsz -= (count+2)*4;
+
+ return (0);
+}
+
+
+static inline int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ u32 *cmd = (u32 *)(uintptr_t)cmdbuf->buf;
+ int count, ret;
+ RING_LOCALS;
+
+ count = (cmd[0]>>16) & 0x3fff;
+
+ if ((cmd[1] & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+ return (EINVAL);
+ }
+ ret = !RADEON_CHECK_OFFSET(dev_priv, cmd[2]);
+ if (ret) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ return (EINVAL);
+ }
+
+ BEGIN_RING(count+2);
+ OUT_RING(cmd[0]);
+ OUT_RING_TABLE(cmdbuf->buf + 4, count + 1);
+ ADVANCE_RING();
+
+ cmdbuf->buf += (count+2)*4;
+ cmdbuf->bufsz -= (count+2)*4;
+
+ return (0);
+}
+
+
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ u32 header;
+ int count;
+ RING_LOCALS;
+
+ if (4 > cmdbuf->bufsz)
+ return (EINVAL);
+
+ /*
+ * Fixme !! This simply emits a packet without much checking.
+ * We need to be smarter.
+ */
+
+ /* obtain first word - actual packet3 header */
+ header = *(u32 *)(uintptr_t)cmdbuf->buf;
+
+ /* Is it packet 3 ? */
+ if ((header >> 30) != 0x3) {
+ DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
+ return (EINVAL);
+ }
+
+ count = (header >> 16) & 0x3fff;
+
+ /* Check again now that we know how much data to expect */
+ if ((count + 2) * 4 > cmdbuf->bufsz) {
+ DRM_ERROR("Expected packet3 of length %d but have only "
+ "%d bytes left\n", (count + 2) * 4, cmdbuf->bufsz);
+ return (EINVAL);
+ }
+
+ /* Is it a packet type we know about ? */
+ switch (header & 0xff00) {
+ case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
+ return (r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header));
+
+ case RADEON_CNTL_BITBLT_MULTI:
+ return (r300_emit_bitblt_multi(dev_priv, cmdbuf));
+
+ case RADEON_CP_INDX_BUFFER:
+ // DRAW_INDX_2 without INDX_BUFFER seems to lock
+ // up the GPU
+ return (r300_emit_indx_buffer(dev_priv, cmdbuf));
+
+ case RADEON_CP_3D_DRAW_IMMD_2:
+ /* triggers drawing using in-packet vertex data */
+ case RADEON_CP_3D_DRAW_VBUF_2:
+ /* triggers drawing of vertex buffers setup elsewhere */
+ case RADEON_CP_3D_DRAW_INDX_2:
+ /* triggers drawing using indices to vertex buffer */
+ case RADEON_WAIT_FOR_IDLE:
+ case RADEON_CP_NOP:
+ /* these packets are safe */
+ break;
+ default:
+ DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
+ return (EINVAL);
+ }
+
+ BEGIN_RING(count + 2);
+ OUT_RING(header);
+ OUT_RING_TABLE((cmdbuf->buf + 4), count + 1);
+ ADVANCE_RING();
+
+ cmdbuf->buf += (count + 2) * 4;
+ cmdbuf->bufsz -= (count + 2) * 4;
+
+ return (0);
+}
+
+/*
+ * Emit a rendering packet3 from userspace.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf, drm_r300_cmd_header_t header)
+{
+ int n;
+ int ret;
+ char *orig_buf = cmdbuf->buf;
+ int orig_bufsz = cmdbuf->bufsz;
+
+ /*
+ * This is a do-while-loop so that we run the interior at least once,
+ * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
+ */
+ n = 0;
+ do {
+ if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
+ ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
+ if (ret)
+ return (ret);
+
+ cmdbuf->buf = orig_buf;
+ cmdbuf->bufsz = orig_bufsz;
+ }
+
+ switch (header.packet3.packet) {
+ case R300_CMD_PACKET3_CLEAR:
+ DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
+ ret = r300_emit_clear(dev_priv, cmdbuf);
+ if (ret) {
+ DRM_ERROR("r300_emit_clear failed\n");
+ return (ret);
+ }
+ break;
+
+ case R300_CMD_PACKET3_RAW:
+ DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
+ ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
+ if (ret) {
+ DRM_ERROR("r300_emit_raw_packet3 failed\n");
+ return (ret);
+ }
+ break;
+
+ default:
+ DRM_ERROR("bad packet3 type %i at %p\n",
+ header.packet3.packet,
+ cmdbuf->buf - sizeof (header));
+ return (EINVAL);
+ }
+
+ n += R300_SIMULTANEOUS_CLIPRECTS;
+ } while (n < cmdbuf->nbox);
+
+ return (0);
+}
+
+/*
+ * Some of the R300 chips seem to be extremely touchy about the two registers
+ * that are configured in r300_pacify.
+ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
+ * sends a command buffer that contains only state setting commands and a
+ * vertex program/parameter upload sequence, this will eventually lead to a
+ * lockup, unless the sequence is bracketed by calls to r300_pacify.
+ * So we should take great care to *always* call r300_pacify before
+ * *anything* 3D related, and again afterwards. This is what the
+ * call bracket in r300_do_cp_cmdbuf is for.
+ */
+
+/*
+ * Emit the sequence to pacify R300.
+ */
+static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
+{
+ RING_LOCALS;
+
+ BEGIN_RING(6);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(0xa);
+ OUT_RING(CP_PACKET0(0x4f18, 0));
+ OUT_RING(0x3);
+ OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
+ OUT_RING(0x0);
+ ADVANCE_RING();
+}
+
+/*
+ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
+ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
+ * be careful about how this function is called.
+ */
+static void r300_discard_buffer(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+
+ buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+ buf->pending = 1;
+ buf->used = 0;
+}
+
+static int r300_scratch(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ u32 *ref_age_base;
+ u32 i, buf_idx, h_pending;
+ RING_LOCALS;
+
+ if (cmdbuf->bufsz < sizeof (uint64_t) +
+ header.scratch.n_bufs * sizeof (buf_idx)) {
+ return (EINVAL);
+ }
+
+ if (header.scratch.reg >= 5) {
+ return (EINVAL);
+ }
+
+ dev_priv->scratch_ages[header.scratch.reg] ++;
+
+ ref_age_base = (u32 *)(uintptr_t)*((uint64_t *)(uintptr_t)cmdbuf->buf);
+
+ cmdbuf->buf += sizeof (uint64_t);
+ cmdbuf->bufsz -= sizeof (uint64_t);
+
+ for (i = 0; i < header.scratch.n_bufs; i++) {
+ buf_idx = *(u32 *)(uintptr_t)cmdbuf->buf;
+ buf_idx *= 2; /* 8 bytes per buf */
+
+ if (DRM_COPY_TO_USER(ref_age_base + buf_idx,
+ &dev_priv->scratch_ages[header.scratch.reg],
+ sizeof (u32))) {
+ return (EINVAL);
+ }
+
+ if (DRM_COPY_FROM_USER(&h_pending,
+ ref_age_base + buf_idx + 1, sizeof (u32))) {
+ return (EINVAL);
+ }
+
+ if (h_pending == 0) {
+ return (EINVAL);
+ }
+
+ h_pending--;
+
+ if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1,
+ &h_pending, sizeof (u32))) {
+ return (EINVAL);
+ }
+
+ cmdbuf->buf += sizeof (buf_idx);
+ cmdbuf->bufsz -= sizeof (buf_idx);
+ }
+
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0));
+ OUT_RING(dev_priv->scratch_ages[header.scratch.reg]);
+ ADVANCE_RING();
+
+ return (0);
+}
+
+/*
+ * Parses and validates a user-supplied command buffer and emits appropriate
+ * commands on the DMA ring buffer.
+ * Called by the ioctl handler function radeon_cp_cmdbuf.
+ */
+/*ARGSUSED*/
+int
+r300_do_cp_cmdbuf(drm_device_t *dev,
+ drm_file_t *fpriv, drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf = NULL;
+ int emit_dispatch_age = 0;
+ int ret = 0;
+
+ DRM_DEBUG("\n");
+
+ /*
+ * See the comment above r300_emit_begin3d for why this call
+ * must be here, and what the cleanup gotos are for.
+ */
+ r300_pacify(dev_priv);
+
+ if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
+ ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
+ if (ret)
+ goto cleanup;
+ }
+
+ while (cmdbuf->bufsz >= sizeof (drm_r300_cmd_header_t)) {
+ int idx;
+ drm_r300_cmd_header_t header;
+
+ header.u = *(unsigned int *)(uintptr_t)cmdbuf->buf;
+
+ cmdbuf->buf += sizeof (header);
+ cmdbuf->bufsz -= sizeof (header);
+
+ switch (header.header.cmd_type) {
+ case R300_CMD_PACKET0:
+ DRM_DEBUG("R300_CMD_PACKET0\n");
+ ret = r300_emit_packet0(dev_priv, cmdbuf, header);
+ if (ret) {
+ DRM_ERROR("r300_emit_packet0 failed\n");
+ goto cleanup;
+ }
+ break;
+
+ case R300_CMD_VPU:
+ DRM_DEBUG("R300_CMD_VPU\n");
+ ret = r300_emit_vpu(dev_priv, cmdbuf, header);
+ if (ret) {
+ DRM_ERROR("r300_emit_vpu failed\n");
+ goto cleanup;
+ }
+ break;
+
+ case R300_CMD_PACKET3:
+ DRM_DEBUG("R300_CMD_PACKET3\n");
+ ret = r300_emit_packet3(dev_priv, cmdbuf, header);
+ if (ret) {
+ DRM_ERROR("r300_emit_packet3 failed\n");
+ goto cleanup;
+ }
+ break;
+
+ case R300_CMD_END3D:
+ DRM_DEBUG("R300_CMD_END3D\n");
+ /*
+ * TODO:
+ * Ideally userspace driver should not need to issue
+ * this call, i.e. the drm driver should issue it
+ * automatically and prevent lockups. In practice, we
+ * do not understand why this call is needed and what
+ * it does (except for some vague guesses that it has
+ * to do with cache coherence) and so the user space
+ * driver does it.
+ *
+ * Once we are sure which uses prevent lockups the code
+ * could be moved into the kernel and the userspace
+ * driver will not need to use this command.
+ *
+ * Note that issuing this command does not hurt anything
+ * except, possibly, performance
+ */
+ r300_pacify(dev_priv);
+ break;
+
+ case R300_CMD_CP_DELAY:
+ /* simple enough, we can do it here */
+ DRM_DEBUG("R300_CMD_CP_DELAY\n");
+ {
+ int i;
+ RING_LOCALS;
+
+ BEGIN_RING(header.delay.count);
+ for (i = 0; i < header.delay.count; i++)
+ OUT_RING(RADEON_CP_PACKET2);
+ ADVANCE_RING();
+ }
+ break;
+
+ case R300_CMD_DMA_DISCARD:
+ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+ idx = header.dma.buf_idx;
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ ret = EINVAL;
+ goto cleanup;
+ }
+
+ buf = dma->buflist[idx];
+ if (buf->filp != fpriv || buf->pending) {
+ DRM_ERROR("bad buffer %p %p %d\n",
+ buf->filp, fpriv, buf->pending);
+ ret = EINVAL;
+ goto cleanup;
+ }
+
+ emit_dispatch_age = 1;
+ r300_discard_buffer(dev, buf);
+ break;
+
+ case R300_CMD_WAIT:
+ /* simple enough, we can do it here */
+ DRM_DEBUG("R300_CMD_WAIT\n");
+ if (header.wait.flags == 0)
+ break; /* nothing to do */
+
+ {
+ RING_LOCALS;
+
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING((header.wait.flags & 0xf) << 14);
+ ADVANCE_RING();
+ }
+ break;
+
+ case R300_CMD_SCRATCH:
+ DRM_DEBUG("R300_CMD_SCRATCH\n");
+ ret = r300_scratch(dev_priv, cmdbuf, header);
+ if (ret) {
+ DRM_ERROR("r300_scratch failed\n");
+ goto cleanup;
+ }
+ break;
+
+ default:
+ DRM_ERROR("bad cmd_type %i at %p\n",
+ header.header.cmd_type,
+ cmdbuf->buf - sizeof (header));
+ ret = EINVAL;
+ goto cleanup;
+ }
+ }
+
+ DRM_DEBUG("END\n");
+
+cleanup:
+ r300_pacify(dev_priv);
+
+ /*
+ * We emit the vertex buffer age here, outside the pacifier "brackets"
+ * for two reasons:
+ * (1) This may coalesce multiple age emissions into a single one and
+ * (2) more importantly, some chips lock up hard when scratch registers
+ * are written inside the pacifier bracket.
+ */
+ if (emit_dispatch_age) {
+ RING_LOCALS;
+
+ /* Emit the vertex buffer age */
+ BEGIN_RING(2);
+ RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
+ ADVANCE_RING();
+ }
+
+ COMMIT_RING();
+
+ return (ret);
+}
diff --git a/usr/src/uts/intel/io/drm/r300_reg.h b/usr/src/uts/intel/io/drm/r300_reg.h
new file mode 100644
index 0000000..8f30b80
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/r300_reg.h
@@ -0,0 +1,1516 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifndef __R300_REG_H_
+#define __R300_REG_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Copyright (C) 2004-2005 Nicolai Haehnle et al.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#define R300_MC_INIT_MISC_LAT_TIMER 0x180
+#define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
+#define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
+#define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
+#define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
+#define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
+#define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
+#define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
+#define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
+
+
+#define R300_MC_INIT_GFX_LAT_TIMER 0x154
+#define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
+#define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
+#define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
+#define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
+#define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
+#define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
+#define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
+#define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
+
+/*
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ *
+ * I am fairly certain that they are correct unless stated otherwise in
+ * comments.
+ */
+
+#define R300_SE_VPORT_XSCALE 0x1D98
+#define R300_SE_VPORT_XOFFSET 0x1D9C
+#define R300_SE_VPORT_YSCALE 0x1DA0
+#define R300_SE_VPORT_YOFFSET 0x1DA4
+#define R300_SE_VPORT_ZSCALE 0x1DA8
+#define R300_SE_VPORT_ZOFFSET 0x1DAC
+
+
+// This register is written directly and also starts data
+// section in many 3d CP_PACKET3's
+#define R300_VAP_VF_CNTL 0x2084
+
+#define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
+#define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
+#define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
+#define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
+#define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
+#define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
+#define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
+#define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
+#define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
+#define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
+#define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
+#define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
+
+#define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
+ /* State based - direct writes to registers trigger vertex generation */
+#define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
+#define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
+#define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
+#define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
+
+ /* I don't think I saw these three used.. */
+#define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
+#define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
+#define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
+
+/* index size - when not set the indices are assumed to be 16 bit */
+#define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
+ /* number of vertices */
+#define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
+
+/* BEGIN: Wild guesses */
+#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
+#define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
+#define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
+#define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
+#define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
+#define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
+#define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+
+#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+#define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+/* END */
+
+#define R300_SE_VTE_CNTL 0x20b0
+#define R300_VPORT_X_SCALE_ENA 0x00000001
+#define R300_VPORT_X_OFFSET_ENA 0x00000002
+#define R300_VPORT_Y_SCALE_ENA 0x00000004
+#define R300_VPORT_Y_OFFSET_ENA 0x00000008
+#define R300_VPORT_Z_SCALE_ENA 0x00000010
+#define R300_VPORT_Z_OFFSET_ENA 0x00000020
+#define R300_VTX_XY_FMT 0x00000100
+#define R300_VTX_Z_FMT 0x00000200
+#define R300_VTX_W0_FMT 0x00000400
+#define R300_VTX_W0_NORMALIZE 0x00000800
+#define R300_VTX_ST_DENORMALIZED 0x00001000
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+/* gap */
+// Where do we get our vertex data?
+//
+// Vertex data either comes either from immediate mode registers or from
+// vertex arrays.
+// There appears to be no mixed mode (though we can force the pitch of
+// vertex arrays to 0, effectively reusing the same element over and over
+// again).
+//
+// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+// if these registers influence vertex array processing.
+//
+// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+//
+// In both cases, vertex attributes are then passed through INPUT_ROUTE.
+
+// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+// into the vertex processor's input registers.
+// The first word routes the first input, the second word the second, etc.
+// The corresponding input is routed into the register with the given index.
+// The list is ended by a word with INPUT_ROUTE_END set.
+//
+// Always set COMPONENTS_4 in immediate mode. */
+
+#define R300_VAP_INPUT_ROUTE_0_0 0x2150
+#define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
+#define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
+#define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
+#define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
+#define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
+#define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_END (1 << 13)
+#define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
+#define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
+#define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
+#define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_0_1 0x2154
+#define R300_VAP_INPUT_ROUTE_0_2 0x2158
+#define R300_VAP_INPUT_ROUTE_0_3 0x215C
+#define R300_VAP_INPUT_ROUTE_0_4 0x2160
+#define R300_VAP_INPUT_ROUTE_0_5 0x2164
+#define R300_VAP_INPUT_ROUTE_0_6 0x2168
+#define R300_VAP_INPUT_ROUTE_0_7 0x216C
+
+/* gap */
+// Notes:
+// - always set up to produce at least two attributes:
+// if vertex program uses only position, fglrx will set normal, too
+// - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
+#define R300_VAP_INPUT_CNTL_0 0x2180
+#define R300_INPUT_CNTL_0_COLOR 0x00000001
+#define R300_VAP_INPUT_CNTL_1 0x2184
+#define R300_INPUT_CNTL_POS 0x00000001
+#define R300_INPUT_CNTL_NORMAL 0x00000002
+#define R300_INPUT_CNTL_COLOR 0x00000004
+#define R300_INPUT_CNTL_TC0 0x00000400
+#define R300_INPUT_CNTL_TC1 0x00000800
+#define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
+#define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
+#define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
+#define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
+#define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
+#define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
+
+/* gap */
+// Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+// are set to a swizzling bit pattern, other words are 0.
+//
+// In immediate mode, the pattern is always set to xyzw. In vertex array
+// mode, the swizzling pattern is e.g. used to set zw components in texture
+// coordinates with only tweo components
+#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
+#define R300_INPUT_ROUTE_SELECT_X 0
+#define R300_INPUT_ROUTE_SELECT_Y 1
+#define R300_INPUT_ROUTE_SELECT_Z 2
+#define R300_INPUT_ROUTE_SELECT_W 3
+#define R300_INPUT_ROUTE_SELECT_ZERO 4
+#define R300_INPUT_ROUTE_SELECT_ONE 5
+#define R300_INPUT_ROUTE_SELECT_MASK 7
+#define R300_INPUT_ROUTE_X_SHIFT 0
+#define R300_INPUT_ROUTE_Y_SHIFT 3
+#define R300_INPUT_ROUTE_Z_SHIFT 6
+#define R300_INPUT_ROUTE_W_SHIFT 9
+#define R300_INPUT_ROUTE_ENABLE (15 << 12)
+#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
+#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
+#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
+#define R300_VAP_INPUT_ROUTE_1_4 0x21F0
+#define R300_VAP_INPUT_ROUTE_1_5 0x21F4
+#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
+#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
+
+/* END */
+
+/* gap */
+// BEGIN: Upload vertex program and data
+// The programmable vertex shader unit has a memory bank of unknown size
+// that can be written to in 16 byte units by writing the address into
+// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+//
+// Pointers into the memory bank are always in multiples of 16 bytes.
+//
+// The memory bank is divided into areas with fixed meaning.
+//
+// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+// whereas the difference between known addresses suggests size 512.
+//
+// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+// Native reported limits and the VPI layout suggest size 256, whereas
+// difference between known addresses suggests size 512.
+//
+// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+// floating point pointsize. The exact purpose of this state is uncertain,
+// as there is also the R300_RE_POINTSIZE register.
+//
+// Multiple vertex programs and parameter sets can be loaded at once,
+// which could explain the size discrepancy.
+#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
+#define R300_PVS_UPLOAD_PROGRAM 0x00000000
+#define R300_PVS_UPLOAD_PARAMETERS 0x00000200
+#define R300_PVS_UPLOAD_POINTSIZE 0x00000406
+/* gap */
+#define R300_VAP_PVS_UPLOAD_DATA 0x2208
+/* END */
+
+/* gap */
+/*
+ * I do not know the purpose of this register. However, I do know that
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
+#define R300_VAP_UNKNOWN_221C 0x221C
+#define R300_221C_NORMAL 0x00000000
+#define R300_221C_CLEAR 0x0001C000
+
+/* gap */
+/*
+ * Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
+
+/* Absolutely no clue what this register is about. */
+#define R300_VAP_UNKNOWN_2288 0x2288
+#define R300_2288_R300 0x00750000 /* -- nh */
+#define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
+
+/* gap */
+/*
+ * Addresses are relative to the vertex program instruction area of the
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * CNTL_1_UNKNOWN points to instruction where last write to position
+ * takes place. Most likely this is used to ignore rest of the program
+ * in cases where group of verts arent visible.
+ * For some reason this "section" is sometimes accepted other instruction
+ * that have no relationship with position calculations.
+ */
+#define R300_VAP_PVS_CNTL_1 0x22D0
+#define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
+#define R300_PVS_CNTL_1_POS_END_SHIFT 10
+#define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CNTL_2 0x22D4
+#define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
+#define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
+#define R300_VAP_PVS_CNTL_3 0x22D8
+#define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
+#define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
+
+// The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+// immediate vertices
+#define R300_VAP_VTX_COLOR_R 0x2464
+#define R300_VAP_VTX_COLOR_G 0x2468
+#define R300_VAP_VTX_COLOR_B 0x246C
+#define R300_VAP_VTX_POS_0_X_1 0x2490
+#define R300_VAP_VTX_POS_0_Y_1 0x2494
+#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2 0x24A0
+#define R300_VAP_VTX_POS_0_Y_2 0x24A4
+#define R300_VAP_VTX_POS_0_Z_2 0x24A8
+#define R300_VAP_VTX_END_OF_PKT 0x24AC
+
+/* gap */
+
+/*
+ * These are values from r300_reg/r300_reg.h - they are known to
+ * be correct and are here so we can use one register file instead
+ * of several
+ * - Vladimir
+ */
+#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
+#define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
+#define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
+#define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
+#define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
+#define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
+#define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
+#define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
+ // each of the following is 3 bits wide, specifies number
+ // of components
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+#define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+
+/*
+ * UNK30 seems to enables point to quad transformation on
+ * textures (or something closely related to that).This bit
+ * is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
+#define R300_GB_ENABLE 0x4008
+#define R300_GB_POINT_STUFF_ENABLE (1<<0)
+#define R300_GB_LINE_STUFF_ENABLE (1<<1)
+#define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
+#define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
+#define R300_GB_UNK30 (1<<30)
+ /* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE 0
+#define R300_GB_TEX_ST 1
+#define R300_GB_TEX_STR 2
+#define R300_GB_TEX0_SOURCE_SHIFT 16
+#define R300_GB_TEX1_SOURCE_SHIFT 18
+#define R300_GB_TEX2_SOURCE_SHIFT 20
+#define R300_GB_TEX3_SOURCE_SHIFT 22
+#define R300_GB_TEX4_SOURCE_SHIFT 24
+#define R300_GB_TEX5_SOURCE_SHIFT 26
+#define R300_GB_TEX6_SOURCE_SHIFT 28
+#define R300_GB_TEX7_SOURCE_SHIFT 30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0 0x4010
+ /* shifts - each of the fields is 4 bits */
+#define R300_GB_MSPOS0__MS_X0_SHIFT 0
+#define R300_GB_MSPOS0__MS_Y0_SHIFT 4
+#define R300_GB_MSPOS0__MS_X1_SHIFT 8
+#define R300_GB_MSPOS0__MS_Y1_SHIFT 12
+#define R300_GB_MSPOS0__MS_X2_SHIFT 16
+#define R300_GB_MSPOS0__MS_Y2_SHIFT 20
+#define R300_GB_MSPOS0__MSBD0_Y 24
+#define R300_GB_MSPOS0__MSBD0_X 28
+
+#define R300_GB_MSPOS1 0x4014
+#define R300_GB_MSPOS1__MS_X3_SHIFT 0
+#define R300_GB_MSPOS1__MS_Y3_SHIFT 4
+#define R300_GB_MSPOS1__MS_X4_SHIFT 8
+#define R300_GB_MSPOS1__MS_Y4_SHIFT 12
+#define R300_GB_MSPOS1__MS_X5_SHIFT 16
+#define R300_GB_MSPOS1__MS_Y5_SHIFT 20
+#define R300_GB_MSPOS1__MSBD1 24
+
+
+#define R300_GB_TILE_CONFIG 0x4018
+#define R300_GB_TILE_ENABLE (1<<0)
+#define R300_GB_TILE_PIPE_COUNT_RV300 0
+#define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
+#define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
+#define R300_GB_TILE_SIZE_8 0
+#define R300_GB_TILE_SIZE_16 (1<<4)
+#define R300_GB_TILE_SIZE_32 (2<<4)
+#define R300_GB_SUPER_SIZE_1 (0<<6)
+#define R300_GB_SUPER_SIZE_2 (1<<6)
+#define R300_GB_SUPER_SIZE_4 (2<<6)
+#define R300_GB_SUPER_SIZE_8 (3<<6)
+#define R300_GB_SUPER_SIZE_16 (4<<6)
+#define R300_GB_SUPER_SIZE_32 (5<<6)
+#define R300_GB_SUPER_SIZE_64 (6<<6)
+#define R300_GB_SUPER_SIZE_128 (7<<6)
+#define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
+#define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
+#define R300_GB_SUPER_TILE_A 0
+#define R300_GB_SUPER_TILE_B (1<<15)
+#define R300_GB_SUBPIXEL_1_12 0
+#define R300_GB_SUBPIXEL_1_16 (1<<16)
+
+#define R300_GB_FIFO_SIZE 0x4024
+ /* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32 0
+#define R300_GB_FIFO_SIZE_64 1
+#define R300_GB_FIFO_SIZE_128 2
+#define R300_GB_FIFO_SIZE_256 3
+#define R300_SC_IFIFO_SIZE_SHIFT 0
+#define R300_SC_TZFIFO_SIZE_SHIFT 2
+#define R300_SC_BFIFO_SIZE_SHIFT 4
+
+#define R300_US_OFIFO_SIZE_SHIFT 12
+#define R300_US_WFIFO_SIZE_SHIFT 14
+ // the following use the same constants as above, but meaning is
+ // is times 2 (i.e. instead of 32 words it means 64 */
+#define R300_RS_TFIFO_SIZE_SHIFT 6
+#define R300_RS_CFIFO_SIZE_SHIFT 8
+#define R300_US_RAM_SIZE_SHIFT 10
+ /* watermarks, 3 bits wide */
+#define R300_RS_HIGHWATER_COL_SHIFT 16
+#define R300_RS_HIGHWATER_TEX_SHIFT 19
+#define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
+#define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
+
+#define R300_GB_SELECT 0x401C
+#define R300_GB_FOG_SELECT_C0A 0
+#define R300_GB_FOG_SELECT_C1A 1
+#define R300_GB_FOG_SELECT_C2A 2
+#define R300_GB_FOG_SELECT_C3A 3
+#define R300_GB_FOG_SELECT_1_1_W 4
+#define R300_GB_FOG_SELECT_Z 5
+#define R300_GB_DEPTH_SELECT_Z 0
+#define R300_GB_DEPTH_SELECT_1_1_W (1<<3)
+#define R300_GB_W_SELECT_1_W 0
+#define R300_GB_W_SELECT_1 (1<<4)
+
+#define R300_GB_AA_CONFIG 0x4020
+#define R300_AA_ENABLE 0x01
+#define R300_AA_SUBSAMPLES_2 0
+#define R300_AA_SUBSAMPLES_3 (1<<1)
+#define R300_AA_SUBSAMPLES_4 (2<<1)
+#define R300_AA_SUBSAMPLES_6 (3<<1)
+
+/* END */
+
+/* gap */
+/* Zero to flush caches. */
+#define R300_TX_CNTL 0x4100
+
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE 0x4104
+#define R300_TX_ENABLE_0 (1 << 0)
+#define R300_TX_ENABLE_1 (1 << 1)
+#define R300_TX_ENABLE_2 (1 << 2)
+#define R300_TX_ENABLE_3 (1 << 3)
+#define R300_TX_ENABLE_4 (1 << 4)
+#define R300_TX_ENABLE_5 (1 << 5)
+#define R300_TX_ENABLE_6 (1 << 6)
+#define R300_TX_ENABLE_7 (1 << 7)
+#define R300_TX_ENABLE_8 (1 << 8)
+#define R300_TX_ENABLE_9 (1 << 9)
+#define R300_TX_ENABLE_10 (1 << 10)
+#define R300_TX_ENABLE_11 (1 << 11)
+#define R300_TX_ENABLE_12 (1 << 12)
+#define R300_TX_ENABLE_13 (1 << 13)
+#define R300_TX_ENABLE_14 (1 << 14)
+#define R300_TX_ENABLE_15 (1 << 15)
+
+// The pointsize is given in multiples of 6. The pointsize can be
+// enormous: Clear() renders a single point that fills the entire
+// framebuffer. */
+#define R300_RE_POINTSIZE 0x421C
+#define R300_POINTSIZE_Y_SHIFT 0
+#define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
+#define R300_POINTSIZE_X_SHIFT 16
+#define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
+#define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
+
+/*
+ * The line width is given in multiples of 6.
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
+#define R300_RE_LINE_CNT 0x4234
+#define R300_LINESIZE_SHIFT 0
+#define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
+#define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
+#define R300_LINE_CNT_HO (1 << 16)
+#define R300_LINE_CNT_VE (1 << 17)
+
+/* Some sort of scale or clamp value for texcoordless textures. */
+#define R300_RE_UNK4238 0x4238
+
+#define R300_RE_SHADE_MODEL 0x4278
+#define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
+#define R300_RE_SHADE_MODEL_FLAT 0x39595
+
+/* Dangerous */
+#define R300_RE_POLYGON_MODE 0x4288
+#define R300_PM_ENABLED (1 << 0)
+#define R300_PM_FRONT_POINT (0 << 0)
+#define R300_PM_BACK_POINT (0 << 0)
+#define R300_PM_FRONT_LINE (1 << 4)
+#define R300_PM_FRONT_FILL (1 << 5)
+#define R300_PM_BACK_LINE (1 << 7)
+#define R300_PM_BACK_FILL (1 << 8)
+
+/*
+ * Not sure why there are duplicate of factor and constant values.
+ * My best guess so far is that there are seperate zbiases for test
+ * and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation
+ * of zbias via pixel shaders.
+ */
+#define R300_RE_ZBIAS_T_FACTOR 0x42A4
+#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
+#define R300_RE_ZBIAS_W_FACTOR 0x42AC
+#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
+
+/*
+ * This register needs to be set to (1<<1) for RV350 to correctly
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias
+ * primitive (FILL, LINE, POINT).
+ * One to enable depth test and one for depth write.
+ * Yet this doesnt explain why depth writes work ...
+ */
+#define R300_RE_OCCLUSION_CNTL 0x42B4
+#define R300_OCCLUSION_ON (1<<1)
+
+#define R300_RE_CULL_CNTL 0x42B8
+#define R300_CULL_FRONT (1 << 0)
+#define R300_CULL_BACK (1 << 1)
+#define R300_FRONT_FACE_CCW (0 << 2)
+#define R300_FRONT_FACE_CW (1 << 2)
+
+
+// BEGIN: Rasterization / Interpolators - many guesses
+// 0_UNKNOWN_18 has always been set except for clear operations.
+// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+// on the vertex program, *not* the fragment program) */
+#define R300_RS_CNTL_0 0x4300
+#define R300_RS_CNTL_TC_CNT_SHIFT 2
+#define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
+#define R300_RS_CNTL_CI_CNT_SHIFT 7
+ /* number of color interpolators used */
+#define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
+/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
+#define R300_RS_CNTL_1 0x4304
+
+/* gap */
+// Only used for texture coordinates.
+// Use the source field to route texture coordinate input from the
+// vertex program to the desired interpolator. Note that the source
+// field is relative to the outputs the vertex program *actually*
+// writes. If a vertex program only writes texcoord[1], this will
+// be source index 0. Set INTERP_USED on all interpolators that
+// produce data used by the fragment program. INTERP_USED looks
+// like a swizzling mask, but I haven't seen it used that way.
+//
+// Note: The _UNKNOWN constants are always set in their respective register.
+// I don't know if this is necessary. */
+#define R300_RS_INTERP_0 0x4310
+#define R300_RS_INTERP_1 0x4314
+#define R300_RS_INTERP_1_UNKNOWN 0x40
+#define R300_RS_INTERP_2 0x4318
+#define R300_RS_INTERP_2_UNKNOWN 0x80
+#define R300_RS_INTERP_3 0x431C
+#define R300_RS_INTERP_3_UNKNOWN 0xC0
+#define R300_RS_INTERP_4 0x4320
+#define R300_RS_INTERP_5 0x4324
+#define R300_RS_INTERP_6 0x4328
+#define R300_RS_INTERP_7 0x432C
+#define R300_RS_INTERP_SRC_SHIFT 2
+#define R300_RS_INTERP_SRC_MASK (7 << 2)
+#define R300_RS_INTERP_USED 0x00D10000
+
+// These DWORDs control how vertex data is routed into fragment program
+// registers, after interpolators. */
+#define R300_RS_ROUTE_0 0x4330
+#define R300_RS_ROUTE_1 0x4334
+#define R300_RS_ROUTE_2 0x4338
+#define R300_RS_ROUTE_3 0x433C /* GUESS */
+#define R300_RS_ROUTE_4 0x4340 /* GUESS */
+#define R300_RS_ROUTE_5 0x4344 /* GUESS */
+#define R300_RS_ROUTE_6 0x4348 /* GUESS */
+#define R300_RS_ROUTE_7 0x434C /* GUESS */
+#define R300_RS_ROUTE_SOURCE_INTERP_0 0
+#define R300_RS_ROUTE_SOURCE_INTERP_1 1
+#define R300_RS_ROUTE_SOURCE_INTERP_2 2
+#define R300_RS_ROUTE_SOURCE_INTERP_3 3
+#define R300_RS_ROUTE_SOURCE_INTERP_4 4
+#define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
+#define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
+#define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
+#define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
+#define R300_RS_ROUTE_DEST_SHIFT 6
+#define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
+
+// Special handling for color: When the fragment program uses color,
+// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+// color register index. */
+#define R300_RS_ROUTE_0_COLOR (1 << 14)
+#define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
+#define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
+/* As above, but for secondary color */
+#define R300_RS_ROUTE_1_COLOR1 (1 << 14)
+#define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
+#define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
+#define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
+/* END */
+
+// BEGIN: Scissors and cliprects
+// There are four clipping rectangles. Their corner coordinates are inclusive.
+// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+// on whether the pixel is inside cliprects 0-3, respectively. For example,
+// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+// the number 3 (binary 0011).
+// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+// the pixel is rasterized.
+//
+// In addition to this, there is a scissors rectangle. Only pixels inside the
+// scissors rectangle are drawn. (coordinates are inclusive)
+//
+// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+// for the purpose of clipping and scissors. */
+#define R300_RE_CLIPRECT_TL_0 0x43B0
+#define R300_RE_CLIPRECT_BR_0 0x43B4
+#define R300_RE_CLIPRECT_TL_1 0x43B8
+#define R300_RE_CLIPRECT_BR_1 0x43BC
+#define R300_RE_CLIPRECT_TL_2 0x43C0
+#define R300_RE_CLIPRECT_BR_2 0x43C4
+#define R300_RE_CLIPRECT_TL_3 0x43C8
+#define R300_RE_CLIPRECT_BR_3 0x43CC
+#define R300_CLIPRECT_OFFSET 1440
+#define R300_CLIPRECT_MASK 0x1FFF
+#define R300_CLIPRECT_X_SHIFT 0
+#define R300_CLIPRECT_X_MASK (0x1FFF << 0)
+#define R300_CLIPRECT_Y_SHIFT 13
+#define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
+#define R300_RE_CLIPRECT_CNTL 0x43D0
+#define R300_CLIP_OUT (1 << 0)
+#define R300_CLIP_0 (1 << 1)
+#define R300_CLIP_1 (1 << 2)
+#define R300_CLIP_10 (1 << 3)
+#define R300_CLIP_2 (1 << 4)
+#define R300_CLIP_20 (1 << 5)
+#define R300_CLIP_21 (1 << 6)
+#define R300_CLIP_210 (1 << 7)
+#define R300_CLIP_3 (1 << 8)
+#define R300_CLIP_30 (1 << 9)
+#define R300_CLIP_31 (1 << 10)
+#define R300_CLIP_310 (1 << 11)
+#define R300_CLIP_32 (1 << 12)
+#define R300_CLIP_320 (1 << 13)
+#define R300_CLIP_321 (1 << 14)
+#define R300_CLIP_3210 (1 << 15)
+
+/* gap */
+#define R300_RE_SCISSORS_TL 0x43E0
+#define R300_RE_SCISSORS_BR 0x43E4
+#define R300_SCISSORS_OFFSET 1440
+#define R300_SCISSORS_X_SHIFT 0
+#define R300_SCISSORS_X_MASK (0x1FFF << 0)
+#define R300_SCISSORS_Y_SHIFT 13
+#define R300_SCISSORS_Y_MASK (0x1FFF << 13)
+/* END */
+
+// BEGIN: Texture specification
+// The texture specification dwords are grouped by meaning and not
+// by texture unit. This means that e.g. the offset for texture
+// image unit N is found in register TX_OFFSET_0 + (4*N) */
+#define R300_TX_FILTER_0 0x4400
+#define R300_TX_REPEAT 0
+#define R300_TX_MIRRORED 1
+#define R300_TX_CLAMP 4
+#define R300_TX_CLAMP_TO_EDGE 2
+#define R300_TX_CLAMP_TO_BORDER 6
+#define R300_TX_WRAP_S_SHIFT 0
+#define R300_TX_WRAP_S_MASK (7 << 0)
+#define R300_TX_WRAP_T_SHIFT 3
+#define R300_TX_WRAP_T_MASK (7 << 3)
+#define R300_TX_WRAP_Q_SHIFT 6
+#define R300_TX_WRAP_Q_MASK (7 << 6)
+#define R300_TX_MAG_FILTER_NEAREST (1 << 9)
+#define R300_TX_MAG_FILTER_LINEAR (2 << 9)
+#define R300_TX_MAG_FILTER_MASK (3 << 9)
+#define R300_TX_MIN_FILTER_NEAREST (1 << 11)
+#define R300_TX_MIN_FILTER_LINEAR (2 << 11)
+#define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11)
+#define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11)
+#define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
+#define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
+
+/*
+ * NOTE: NEAREST doesnt seem to exist
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+#define R300_TX_MIN_FILTER_ANISO_NEAREST ((0 << 13)
+#define R300_TX_MIN_FILTER_ANISO_LINEAR ((0 << 13)
+#define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13)
+#define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR ((2 << 13)
+#define R300_TX_MIN_FILTER_MASK ((15 << 11) | (3 << 13))
+#define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
+#define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
+#define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
+#define R300_TX_MAX_ANISO_8_TO_1 (6 << 21)
+#define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
+#define R300_TX_MAX_ANISO_MASK (14 << 21)
+
+#define R300_TX_FILTER1_0 0x4440
+#define R300_CHROMA_KEY_MODE_DISABLE 0
+#define R300_CHROMA_KEY_FORCE 1
+#define R300_CHROMA_KEY_BLEND 2
+#define R300_MC_ROUND_NORMAL (0<<2)
+#define R300_MC_ROUND_MPEG4 (1<<2)
+#define R300_LOD_BIAS_MASK 0x1fff
+#define R300_EDGE_ANISO_EDGE_DIAG (0<<13)
+#define R300_EDGE_ANISO_EDGE_ONLY (1<<13)
+#define R300_MC_COORD_TRUNCATE_DISABLE (0<<14)
+#define R300_MC_COORD_TRUNCATE_MPEG (1<<14)
+#define R300_TX_TRI_PERF_0_8 (0<<15)
+#define R300_TX_TRI_PERF_1_8 (1<<15)
+#define R300_TX_TRI_PERF_1_4 (2<<15)
+#define R300_TX_TRI_PERF_3_8 (3<<15)
+#define R300_ANISO_THRESHOLD_MASK (7<<17)
+
+#define R300_TX_SIZE_0 0x4480
+#define R300_TX_WIDTHMASK_SHIFT 0
+#define R300_TX_WIDTHMASK_MASK (2047 << 0)
+#define R300_TX_HEIGHTMASK_SHIFT 11
+#define R300_TX_HEIGHTMASK_MASK (2047 << 11)
+#define R300_TX_UNK23 (1 << 23)
+#define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
+#define R300_TX_SIZE_MASK (15 << 26)
+#define R300_TX_SIZE_PROJECTED (1<<30)
+#define R300_TX_SIZE_TXPITCH_EN (1<<31)
+#define R300_TX_FORMAT_0 0x44C0
+ /* The interpretation of the format word by Wladimir van der Laan */
+ /*
+ * The X, Y, Z and W refer to the layout of the components.
+ * They are given meanings as R, G, B and Alpha by the swizzle
+ * specification
+ */
+#define R300_TX_FORMAT_X8 0x0
+#define R300_TX_FORMAT_X16 0x1
+#define R300_TX_FORMAT_Y4X4 0x2
+#define R300_TX_FORMAT_Y8X8 0x3
+#define R300_TX_FORMAT_Y16X16 0x4
+#define R300_TX_FORMAT_Z3Y3X2 0x5
+#define R300_TX_FORMAT_Z5Y6X5 0x6
+#define R300_TX_FORMAT_Z6Y5X5 0x7
+#define R300_TX_FORMAT_Z11Y11X10 0x8
+#define R300_TX_FORMAT_Z10Y11X11 0x9
+#define R300_TX_FORMAT_W4Z4Y4X4 0xA
+#define R300_TX_FORMAT_W1Z5Y5X5 0xB
+#define R300_TX_FORMAT_W8Z8Y8X8 0xC
+#define R300_TX_FORMAT_W2Z10Y10X10 0xD
+#define R300_TX_FORMAT_W16Z16Y16X16 0xE
+#define R300_TX_FORMAT_DXT1 0xF
+#define R300_TX_FORMAT_DXT3 0x10
+#define R300_TX_FORMAT_DXT5 0x11
+#define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
+#define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
+#define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
+#define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
+ /* 0x16 - some 16 bit green format.. ?? */
+#define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
+#define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
+
+ /* gap */
+ /* Floating point formats */
+ /* Note - hardware supports both 16 and 32 bit floating point */
+#define R300_TX_FORMAT_FL_I16 0x18
+#define R300_TX_FORMAT_FL_I16A16 0x19
+#define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
+#define R300_TX_FORMAT_FL_I32 0x1B
+#define R300_TX_FORMAT_FL_I32A32 0x1C
+#define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
+ /* alpha modes, convenience mostly */
+ // if you have alpha, pick constant appropriate to the
+ // number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc
+#define R300_TX_FORMAT_ALPHA_1CH 0x000
+#define R300_TX_FORMAT_ALPHA_2CH 0x200
+#define R300_TX_FORMAT_ALPHA_4CH 0x600
+#define R300_TX_FORMAT_ALPHA_NONE 0xA00
+ /* Swizzling */
+ /* constants */
+#define R300_TX_FORMAT_X 0
+#define R300_TX_FORMAT_Y 1
+#define R300_TX_FORMAT_Z 2
+#define R300_TX_FORMAT_W 3
+#define R300_TX_FORMAT_ZERO 4
+#define R300_TX_FORMAT_ONE 5
+#define R300_TX_FORMAT_CUT_Z 6
+ /* 2.0*Z, everything above 1.0 is set to 0.0 */
+#define R300_TX_FORMAT_CUT_W 7
+ /* 2.0*W, everything above 1.0 is set to 0.0 */
+
+#define R300_TX_FORMAT_B_SHIFT 18
+#define R300_TX_FORMAT_G_SHIFT 15
+#define R300_TX_FORMAT_R_SHIFT 12
+#define R300_TX_FORMAT_A_SHIFT 9
+ /* Convenience macro to take care of layout and swizzling */
+#define R300_EASY_TX_FORMAT(B, G, R, A, FMT) (\
+ ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
+ | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
+ | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
+ | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
+ | (R300_TX_FORMAT_##FMT))
+ /* These can be ORed with result of R300_EASY_TX_FORMAT() */
+/* We don't really know what they do. Take values from a constant color ? */
+#define R300_TX_FORMAT_CONST_X (1<<5)
+#define R300_TX_FORMAT_CONST_Y (2<<5)
+#define R300_TX_FORMAT_CONST_Z (4<<5)
+#define R300_TX_FORMAT_CONST_W (8<<5)
+
+#define R300_TX_FORMAT_YUV_MODE 0x00800000
+
+#define R300_TX_PITCH_0 0x4500
+ /* obvious missing in gap */
+#define R300_TX_OFFSET_0 0x4540
+/* BEGIN: Guess from R200 */
+#define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
+#define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+#define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
+#define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+#define R300_TXO_MACRO_TILE (1 << 2)
+#define R300_TXO_MICRO_TILE (1 << 3)
+#define R300_TXO_OFFSET_MASK 0xffffffe0
+#define R300_TXO_OFFSET_SHIFT 5
+/* END */
+#define R300_TX_CHROMA_KEY_0 0x4580
+ /* 32 bit chroma key */
+#define R300_TX_BORDER_COLOR_0 0x45C0
+ /* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+
+/* END */
+
+// BEGIN: Fragment program instruction set
+// Fragment programs are written directly into register space.
+// There are separate instruction streams for texture instructions and ALU
+// instructions.
+// In order to synchronize these streams, the program is divided into up
+// to 4 nodes. Each node begins with a number of TEX operations, followed
+// by a number of ALU operations.
+// The first node can have zero TEX ops, all subsequent nodes must have at least
+// one TEX ops.
+// All nodes must have at least one ALU op.
+//
+// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+// 1 node, a value of 3 means 4 nodes.
+// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+// offsets into the respective instruction streams, while *_END points to the
+// last instruction relative to this offset.
+#define R300_PFS_CNTL_0 0x4600
+#define R300_PFS_CNTL_LAST_NODES_SHIFT 0
+#define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
+#define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
+#define R300_PFS_CNTL_1 0x4604
+// There is an unshifted value here which has so far always been equal to the
+// index of the highest used temporary register.
+#define R300_PFS_CNTL_2 0x4608
+#define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
+#define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
+#define R300_PFS_CNTL_ALU_END_SHIFT 6
+#define R300_PFS_CNTL_ALU_END_MASK (63 << 6)
+#define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
+#define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
+#define R300_PFS_CNTL_TEX_END_SHIFT 18
+#define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
+
+/* gap */
+// Nodes are stored backwards. The last active node is always stored in
+// PFS_NODE_3.
+// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+// first node is stored in NODE_2, the second node is stored in NODE_3.
+//
+// Offsets are relative to the master offset from PFS_CNTL_2.
+// LAST_NODE is set for the last node, and only for the last node.
+#define R300_PFS_NODE_0 0x4610
+#define R300_PFS_NODE_1 0x4614
+#define R300_PFS_NODE_2 0x4618
+#define R300_PFS_NODE_3 0x461C
+#define R300_PFS_NODE_ALU_OFFSET_SHIFT 0
+#define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0)
+#define R300_PFS_NODE_ALU_END_SHIFT 6
+#define R300_PFS_NODE_ALU_END_MASK (63 << 6)
+#define R300_PFS_NODE_TEX_OFFSET_SHIFT 12
+#define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
+#define R300_PFS_NODE_TEX_END_SHIFT 17
+#define R300_PFS_NODE_TEX_END_MASK (31 << 17)
+/* #define R300_PFS_NODE_LAST_NODE (1 << 22) */
+#define R300_PFS_NODE_OUTPUT_COLOR (1 << 22)
+#define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23)
+
+// TEX
+// As far as I can tell, texture instructions cannot write into output
+// registers directly. A subsequent ALU instruction is always necessary,
+// even if it's just MAD o0, r0, 1, 0
+#define R300_PFS_TEXI_0 0x4620
+#define R300_FPITX_SRC_SHIFT 0
+#define R300_FPITX_SRC_MASK (31 << 0)
+#define R300_FPITX_SRC_CONST (1 << 5) /* GUESS */
+#define R300_FPITX_DST_SHIFT 6
+#define R300_FPITX_DST_MASK (31 << 6)
+#define R300_FPITX_IMAGE_SHIFT 11
+#define R300_FPITX_IMAGE_MASK (15 << 11)
+ /* GUESS based on layout and native limits */
+/*
+ * Unsure if these are opcodes, or some kind of bitfield, but this is how
+ * they were set when I checked
+ */
+#define R300_FPITX_OPCODE_SHIFT 15
+#define R300_FPITX_OP_TEX 1
+#define R300_FPITX_OP_KIL 2
+#define R300_FPITX_OP_TXP 3
+#define R300_FPITX_OP_TXB 4
+
+// ALU
+// The ALU instructions register blocks are enumerated according to the order
+// in which fglrx. I assume there is space for 64 instructions, since
+// each block has space for a maximum of 64 DWORDs, and this matches reported
+// native limits.
+//
+// The basic functional block seems to be one MAD for each color and alpha,
+// and an adder that adds all components after the MUL.
+// - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+// - DP4: Use OUTC_DP4, OUTA_DP4
+// - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+// - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+// - CMP: If ARG2 < 0, return ARG1, else return ARG0
+// - FLR: use FRC+MAD
+// - XPD: use MAD+MAD
+// - SGE, SLT: use MAD+CMP
+// - RSQ: use ABS modifier for argument
+// - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
+// into color register
+// - apparently, there's no quick DST operation
+// - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+// - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+// - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+//
+// Operand selection
+// First stage selects three sources from the available registers and
+// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+// fglrx sorts the three source fields: Registers before constants,
+// lower indices before higher indices; I do not know whether this is necessary.
+// fglrx fills unused sources with "read constant 0"
+// According to specs, you cannot select more than two different constants.
+//
+// Second stage selects the operands from the sources. This is defined in
+// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+// zero and one.
+// Swizzling and negation happens in this stage, as well.
+//
+// Important: Color and alpha seem to be mostly separate, i.e. their sources
+// selection appears to be fully independent (the register storage is probably
+// physically split into a color and an alpha section).
+// However (because of the apparent physical split), there is some interaction
+// WRT swizzling. If, for example, you want to load an R component into an
+// Alpha operand, this R component is taken from a *color* source, not from
+// an alpha source. The corresponding register doesn't even have to appear in
+// the alpha sources list. (I hope this alll makes sense to you)
+//
+// Destination selection
+// The destination register index is in FPI1 (color) and FPI3 (alpha) together
+// with enable bits.
+// There are separate enable bits for writing into temporary registers
+// (DSTC_REG_* /DSTA_REG) and and program output registers
+// (DSTC_OUTPUT_* /DSTA_OUTPUT).
+// You can write to both at once, or not write at all (the same index
+// must be used for both).
+//
+// Note: There is a special form for LRP
+// - Argument order is the same as in ARB_fragment_program.
+// - Operation is MAD
+// - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+// - Set FPI0/FPI2_SPECIAL_LRP
+// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+#define R300_PFS_INSTR1_0 0x46C0
+#define R300_FPI1_SRC0C_SHIFT 0
+#define R300_FPI1_SRC0C_MASK (31 << 0)
+#define R300_FPI1_SRC0C_CONST (1 << 5)
+#define R300_FPI1_SRC1C_SHIFT 6
+#define R300_FPI1_SRC1C_MASK (31 << 6)
+#define R300_FPI1_SRC1C_CONST (1 << 11)
+#define R300_FPI1_SRC2C_SHIFT 12
+#define R300_FPI1_SRC2C_MASK (31 << 12)
+#define R300_FPI1_SRC2C_CONST (1 << 17)
+#define R300_FPI1_DSTC_SHIFT 18
+#define R300_FPI1_DSTC_MASK (31 << 18)
+#define R300_FPI1_DSTC_REG_MASK_SHIFT 23
+#define R300_FPI1_DSTC_REG_X (1 << 23)
+#define R300_FPI1_DSTC_REG_Y (1 << 24)
+#define R300_FPI1_DSTC_REG_Z (1 << 25)
+#define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26
+#define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
+#define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
+#define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
+
+#define R300_PFS_INSTR3_0 0x47C0
+#define R300_FPI3_SRC0A_SHIFT 0
+#define R300_FPI3_SRC0A_MASK (31 << 0)
+#define R300_FPI3_SRC0A_CONST (1 << 5)
+#define R300_FPI3_SRC1A_SHIFT 6
+#define R300_FPI3_SRC1A_MASK (31 << 6)
+#define R300_FPI3_SRC1A_CONST (1 << 11)
+#define R300_FPI3_SRC2A_SHIFT 12
+#define R300_FPI3_SRC2A_MASK (31 << 12)
+#define R300_FPI3_SRC2A_CONST (1 << 17)
+#define R300_FPI3_DSTA_SHIFT 18
+#define R300_FPI3_DSTA_MASK (31 << 18)
+#define R300_FPI3_DSTA_REG (1 << 23)
+#define R300_FPI3_DSTA_OUTPUT (1 << 24)
+#define R300_FPI3_DSTA_DEPTH (1 << 27)
+
+#define R300_PFS_INSTR0_0 0x48C0
+#define R300_FPI0_ARGC_SRC0C_XYZ 0
+#define R300_FPI0_ARGC_SRC0C_XXX 1
+#define R300_FPI0_ARGC_SRC0C_YYY 2
+#define R300_FPI0_ARGC_SRC0C_ZZZ 3
+#define R300_FPI0_ARGC_SRC1C_XYZ 4
+#define R300_FPI0_ARGC_SRC1C_XXX 5
+#define R300_FPI0_ARGC_SRC1C_YYY 6
+#define R300_FPI0_ARGC_SRC1C_ZZZ 7
+#define R300_FPI0_ARGC_SRC2C_XYZ 8
+#define R300_FPI0_ARGC_SRC2C_XXX 9
+#define R300_FPI0_ARGC_SRC2C_YYY 10
+#define R300_FPI0_ARGC_SRC2C_ZZZ 11
+#define R300_FPI0_ARGC_SRC0A 12
+#define R300_FPI0_ARGC_SRC1A 13
+#define R300_FPI0_ARGC_SRC2A 14
+#define R300_FPI0_ARGC_SRC1C_LRP 15
+#define R300_FPI0_ARGC_ZERO 20
+#define R300_FPI0_ARGC_ONE 21
+#define R300_FPI0_ARGC_HALF 22 /* GUESS */
+#define R300_FPI0_ARGC_SRC0C_YZX 23
+#define R300_FPI0_ARGC_SRC1C_YZX 24
+#define R300_FPI0_ARGC_SRC2C_YZX 25
+#define R300_FPI0_ARGC_SRC0C_ZXY 26
+#define R300_FPI0_ARGC_SRC1C_ZXY 27
+#define R300_FPI0_ARGC_SRC2C_ZXY 28
+#define R300_FPI0_ARGC_SRC0CA_WZY 29
+#define R300_FPI0_ARGC_SRC1CA_WZY 30
+#define R300_FPI0_ARGC_SRC2CA_WZY 31
+
+#define R300_FPI0_ARG0C_SHIFT 0
+#define R300_FPI0_ARG0C_MASK (31 << 0)
+#define R300_FPI0_ARG0C_NEG (1 << 5)
+#define R300_FPI0_ARG0C_ABS (1 << 6)
+#define R300_FPI0_ARG1C_SHIFT 7
+#define R300_FPI0_ARG1C_MASK (31 << 7)
+#define R300_FPI0_ARG1C_NEG (1 << 12)
+#define R300_FPI0_ARG1C_ABS (1 << 13)
+#define R300_FPI0_ARG2C_SHIFT 14
+#define R300_FPI0_ARG2C_MASK (31 << 14)
+#define R300_FPI0_ARG2C_NEG (1 << 19)
+#define R300_FPI0_ARG2C_ABS (1 << 20)
+#define R300_FPI0_SPECIAL_LRP (1 << 21)
+#define R300_FPI0_OUTC_MAD (0 << 23)
+#define R300_FPI0_OUTC_DP3 (1 << 23)
+#define R300_FPI0_OUTC_DP4 (2 << 23)
+#define R300_FPI0_OUTC_MIN (4 << 23)
+#define R300_FPI0_OUTC_MAX (5 << 23)
+#define R300_FPI0_OUTC_CMP (8 << 23)
+#define R300_FPI0_OUTC_FRC (9 << 23)
+#define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
+#define R300_FPI0_OUTC_SAT (1 << 30)
+#define R300_FPI0_INSERT_NOP (1 << 31)
+
+#define R300_PFS_INSTR2_0 0x49C0
+#define R300_FPI2_ARGA_SRC0C_X 0
+#define R300_FPI2_ARGA_SRC0C_Y 1
+#define R300_FPI2_ARGA_SRC0C_Z 2
+#define R300_FPI2_ARGA_SRC1C_X 3
+#define R300_FPI2_ARGA_SRC1C_Y 4
+#define R300_FPI2_ARGA_SRC1C_Z 5
+#define R300_FPI2_ARGA_SRC2C_X 6
+#define R300_FPI2_ARGA_SRC2C_Y 7
+#define R300_FPI2_ARGA_SRC2C_Z 8
+#define R300_FPI2_ARGA_SRC0A 9
+#define R300_FPI2_ARGA_SRC1A 10
+#define R300_FPI2_ARGA_SRC2A 11
+#define R300_FPI2_ARGA_SRC1A_LRP 15
+#define R300_FPI2_ARGA_ZERO 16
+#define R300_FPI2_ARGA_ONE 17
+#define R300_FPI2_ARGA_HALF 18 /* GUESS */
+
+#define R300_FPI2_ARG0A_SHIFT 0
+#define R300_FPI2_ARG0A_MASK (31 << 0)
+#define R300_FPI2_ARG0A_NEG (1 << 5)
+#define R300_FPI2_ARG0A_ABS (1 << 6) /* GUESS */
+#define R300_FPI2_ARG1A_SHIFT 7
+#define R300_FPI2_ARG1A_MASK (31 << 7)
+#define R300_FPI2_ARG1A_NEG (1 << 12)
+#define R300_FPI2_ARG1A_ABS (1 << 13) /* GUESS */
+#define R300_FPI2_ARG2A_SHIFT 14
+#define R300_FPI2_ARG2A_MASK (31 << 14)
+#define R300_FPI2_ARG2A_NEG (1 << 19)
+#define R300_FPI2_ARG2A_ABS (1 << 20) /* GUESS */
+#define R300_FPI2_SPECIAL_LRP (1 << 21)
+#define R300_FPI2_OUTA_MAD (0 << 23)
+#define R300_FPI2_OUTA_DP4 (1 << 23)
+#define R300_FPI2_OUTA_MIN (2 << 23)
+#define R300_FPI2_OUTA_MAX (3 << 23)
+#define R300_FPI2_OUTA_CMP (6 << 23)
+#define R300_FPI2_OUTA_FRC (7 << 23)
+#define R300_FPI2_OUTA_EX2 (8 << 23)
+#define R300_FPI2_OUTA_LG2 (9 << 23)
+#define R300_FPI2_OUTA_RCP (10 << 23)
+#define R300_FPI2_OUTA_RSQ (11 << 23)
+#define R300_FPI2_OUTA_SAT (1 << 30)
+#define R300_FPI2_UNKNOWN_31 (1 << 31)
+/* END */
+
+/* gap */
+#define R300_PP_ALPHA_TEST 0x4BD4
+#define R300_REF_ALPHA_MASK 0x000000ff
+#define R300_ALPHA_TEST_FAIL (0 << 8)
+#define R300_ALPHA_TEST_LESS (1 << 8)
+#define R300_ALPHA_TEST_LEQUAL (3 << 8)
+#define R300_ALPHA_TEST_EQUAL (2 << 8)
+#define R300_ALPHA_TEST_GEQUAL (6 << 8)
+#define R300_ALPHA_TEST_GREATER (4 << 8)
+#define R300_ALPHA_TEST_NEQUAL (5 << 8)
+#define R300_ALPHA_TEST_PASS (7 << 8)
+#define R300_ALPHA_TEST_OP_MASK (7 << 8)
+#define R300_ALPHA_TEST_ENABLE (1 << 11)
+
+/* gap */
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X 0x4C00
+#define R300_PFS_PARAM_0_Y 0x4C04
+#define R300_PFS_PARAM_0_Z 0x4C08
+#define R300_PFS_PARAM_0_W 0x4C0C
+/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
+#define R300_PFS_PARAM_31_X 0x4DF0
+#define R300_PFS_PARAM_31_Y 0x4DF4
+#define R300_PFS_PARAM_31_Z 0x4DF8
+#define R300_PFS_PARAM_31_W 0x4DFC
+
+// Notes:
+// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used
+// in the application
+// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and
+// ABLEND are set to the same
+// function (both registers are always set up completely in any case)
+// - Most blend flags are simply copied from R200 and not tested yet
+#define R300_RB3D_CBLEND 0x4E04
+#define R300_RB3D_ABLEND 0x4E08
+/* the following only appear in CBLEND */
+#define R300_BLEND_ENABLE (1 << 0)
+#define R300_BLEND_UNKNOWN (3 << 1)
+#define R300_BLEND_NO_SEPARATE (1 << 3)
+/* the following are shared between CBLEND and ABLEND */
+#define R300_FCN_MASK (3 << 12)
+#define R300_COMB_FCN_ADD_CLAMP (0 << 12)
+#define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
+#define R300_COMB_FCN_SUB_CLAMP (2 << 12)
+#define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
+#define R300_SRC_BLEND_GL_ZERO (32 << 16)
+#define R300_SRC_BLEND_GL_ONE (33 << 16)
+#define R300_SRC_BLEND_GL_SRC_COLOR (34 << 16)
+#define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+#define R300_SRC_BLEND_GL_DST_COLOR (36 << 16)
+#define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+#define R300_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
+#define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+#define R300_SRC_BLEND_GL_DST_ALPHA (40 << 16)
+#define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+#define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
+#define R300_SRC_BLEND_MASK (63 << 16)
+#define R300_DST_BLEND_GL_ZERO (32 << 24)
+#define R300_DST_BLEND_GL_ONE (33 << 24)
+#define R300_DST_BLEND_GL_SRC_COLOR (34 << 24)
+#define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+#define R300_DST_BLEND_GL_DST_COLOR (36 << 24)
+#define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+#define R300_DST_BLEND_GL_SRC_ALPHA (38 << 24)
+#define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+#define R300_DST_BLEND_GL_DST_ALPHA (40 << 24)
+#define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+#define R300_DST_BLEND_MASK (63 << 24)
+#define R300_RB3D_COLORMASK 0x4E0C
+#define R300_COLORMASK0_B (1<<0)
+#define R300_COLORMASK0_G (1<<1)
+#define R300_COLORMASK0_R (1<<2)
+#define R300_COLORMASK0_A (1<<3)
+
+/* gap */
+#define R300_RB3D_COLOROFFSET0 0x4E28
+#define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
+/* gap */
+// Bit 16: Larger tiles
+// Bit 17: 4x2 tiles
+// Bit 18: Extremely weird tile like, but some pixels duplicated?
+#define R300_RB3D_COLORPITCH0 0x4E38
+#define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
+#define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
+#define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
+#define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
+#define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
+#define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
+#define R300_COLOR_FORMAT_RGB565 (2 << 22)
+#define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
+#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
+
+/* gap */
+/*
+ * Guess by Vladimir.
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
+#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C
+#define R300_RB3D_DSTCACHE_02 0x00000002
+#define R300_RB3D_DSTCACHE_0A 0x0000000A
+
+/* gap */
+/*
+ * There seems to be no "write only" setting, so use
+ * Z-test = ALWAYS for this. Bit (1<<8) is the "test"
+ * bit. so plain write is 6 - vd
+ */
+#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
+#define R300_RB3D_Z_DISABLED_1 0x00000010 /* GUESS */
+#define R300_RB3D_Z_DISABLED_2 0x00000014 /* GUESS */
+#define R300_RB3D_Z_TEST 0x00000012
+#define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
+#define R300_RB3D_Z_WRITE_ONLY 0x00000006
+
+#define R300_RB3D_Z_TEST 0x00000012
+#define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
+#define R300_RB3D_Z_WRITE_ONLY 0x00000006
+#define R300_RB3D_STENCIL_ENABLE 0x00000001
+
+#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
+ /* functions */
+#define R300_ZS_NEVER 0
+#define R300_ZS_LESS 1
+#define R300_ZS_LEQUAL 2
+#define R300_ZS_EQUAL 3
+#define R300_ZS_GEQUAL 4
+#define R300_ZS_GREATER 5
+#define R300_ZS_NOTEQUAL 6
+#define R300_ZS_ALWAYS 7
+#define R300_ZS_MASK 7
+ /* operations */
+#define R300_ZS_KEEP 0
+#define R300_ZS_ZERO 1
+#define R300_ZS_REPLACE 2
+#define R300_ZS_INCR 3
+#define R300_ZS_DECR 4
+#define R300_ZS_INVERT 5
+#define R300_ZS_INCR_WRAP 6
+#define R300_ZS_DECR_WRAP 7
+
+/*
+ * front and back refer to operations done for front
+ * and back faces, i.e. separate stencil function support
+ */
+#define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
+#define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
+#define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
+#define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9
+#define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12
+#define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15
+#define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18
+#define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21
+#define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
+
+
+
+#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08
+#define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0
+#define R300_RB3D_ZS2_STENCIL_MASK 0xFF
+#define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8
+#define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16
+
+/* gap */
+
+#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
+#define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
+#define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+
+/* gap */
+#define R300_RB3D_DEPTHOFFSET 0x4F20
+#define R300_RB3D_DEPTHPITCH 0x4F24
+#define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
+#define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
+#define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
+#define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
+#define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
+#define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
+
+/*
+ * BEGIN: Vertex program instruction set
+ * Every instruction is four dwords long:
+ * DWORD 0: output and opcode
+ * DWORD 1: first argument
+ * DWORD 2: second argument
+ * DWORD 3: third argument
+ *
+ * Notes:
+ * - ABS r, a is implemented as MAX r, a, -a
+ * - MOV is implemented as ADD to zero
+ * - XPD is implemented as MUL + MAD
+ * - FLR is implemented as FRC + ADD
+ * - apparently, fglrx tries to schedule instructions so that there
+ * is at least one instruction between the write to a temporary
+ * and the first read from said temporary; however, violations
+ * of this scheduling are allowed
+ * - register indices seem to be unrelated with OpenGL aliasing to
+ * conventional state
+ * - only one attribute and one parameter can be loaded at a time;
+ * however, the same attribute/parameter can be used for more
+ * than one argument
+ * - the second software argument for POW is the third hardware
+ * argument (no idea why)
+ * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ * The single argument is replicated across all three inputs, but swizzled:
+ * First argument: xyzy
+ * Second argument: xyzx
+ * Third argument: xyzw
+ * Whenever the result is used later in the fragment program, fglrx forces
+ * x and w to be 1.0 in the input selection; I don't know whether this is
+ * strictly necessary
+ */
+#define R300_VPI_OUT_OP_DOT (1 << 0)
+#define R300_VPI_OUT_OP_MUL (2 << 0)
+#define R300_VPI_OUT_OP_ADD (3 << 0)
+#define R300_VPI_OUT_OP_MAD (4 << 0)
+#define R300_VPI_OUT_OP_DST (5 << 0)
+#define R300_VPI_OUT_OP_FRC (6 << 0)
+#define R300_VPI_OUT_OP_MAX (7 << 0)
+#define R300_VPI_OUT_OP_MIN (8 << 0)
+#define R300_VPI_OUT_OP_SGE (9 << 0)
+#define R300_VPI_OUT_OP_SLT (10 << 0)
+#define R300_VPI_OUT_OP_UNK12 (12 << 0)
+ /*
+ * Used in GL_POINT_DISTANCE_ATTENUATION_ARB,
+ * vector(scalar, vector)
+ */
+#define R300_VPI_OUT_OP_EXP (65 << 0)
+#define R300_VPI_OUT_OP_LOG (66 << 0)
+#define R300_VPI_OUT_OP_UNK67 (67 << 0)
+ /* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_LIT (68 << 0)
+#define R300_VPI_OUT_OP_POW (69 << 0)
+#define R300_VPI_OUT_OP_RCP (70 << 0)
+#define R300_VPI_OUT_OP_RSQ (72 << 0)
+#define R300_VPI_OUT_OP_UNK73 (73 << 0)
+ /*
+ * Used in GL_POINT_DISTANCE_ATTENUATION_ARB,
+ * scalar(scalar)
+ */
+#define R300_VPI_OUT_OP_EX2 (75 << 0)
+#define R300_VPI_OUT_OP_LG2 (76 << 0)
+#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
+#define R300_VPI_OUT_OP_UNK129 (129 << 0)
+ /* all temps, vector(scalar, vector, vector) */
+
+#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
+#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
+
+#define R300_VPI_OUT_REG_INDEX_SHIFT 13
+#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13)
+ /* GUESS based on fglrx native limits */
+
+#define R300_VPI_OUT_WRITE_X (1 << 20)
+#define R300_VPI_OUT_WRITE_Y (1 << 21)
+#define R300_VPI_OUT_WRITE_Z (1 << 22)
+#define R300_VPI_OUT_WRITE_W (1 << 23)
+
+#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0)
+#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
+#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
+#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
+#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) /* GUESS */
+
+#define R300_VPI_IN_REG_INDEX_SHIFT 5
+#define R300_VPI_IN_REG_INDEX_MASK (255 << 5)
+ /* GUESS based on fglrx native limits */
+
+/*
+ * The R300 can select components from the input register arbitrarily.
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
+#define R300_VPI_IN_SELECT_X 0
+#define R300_VPI_IN_SELECT_Y 1
+#define R300_VPI_IN_SELECT_Z 2
+#define R300_VPI_IN_SELECT_W 3
+#define R300_VPI_IN_SELECT_ZERO 4
+#define R300_VPI_IN_SELECT_ONE 5
+#define R300_VPI_IN_SELECT_MASK 7
+
+#define R300_VPI_IN_X_SHIFT 13
+#define R300_VPI_IN_Y_SHIFT 16
+#define R300_VPI_IN_Z_SHIFT 19
+#define R300_VPI_IN_W_SHIFT 22
+
+#define R300_VPI_IN_NEG_X (1 << 25)
+#define R300_VPI_IN_NEG_Y (1 << 26)
+#define R300_VPI_IN_NEG_Z (1 << 27)
+#define R300_VPI_IN_NEG_W (1 << 28)
+/* END */
+
+/* BEGIN: Packet 3 commands */
+
+// A primitive emission dword.
+#define R300_PRIM_TYPE_NONE (0 << 0)
+#define R300_PRIM_TYPE_POINT (1 << 0)
+#define R300_PRIM_TYPE_LINE (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
+#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0)
+ // GUESS (based on r200)
+#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
+#define R300_PRIM_TYPE_QUADS (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
+#define R300_PRIM_TYPE_POLYGON (15 << 0)
+#define R300_PRIM_TYPE_MASK 0xF
+#define R300_PRIM_WALK_IND (1 << 4)
+#define R300_PRIM_WALK_LIST (2 << 4)
+#define R300_PRIM_WALK_RING (3 << 4)
+#define R300_PRIM_WALK_MASK (3 << 4)
+#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6)
+ // GUESS (based on r200)
+#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) // GUESS
+#define R300_PRIM_NUM_VERTICES_SHIFT 16
+
+// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+// Two parameter dwords:
+// 0. The first parameter appears to be always 0
+// 1. The second parameter is a standard primitive emission dword.
+#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
+
+// Specify the full set of vertex arrays as (address, stride).
+// The first parameter is the number of vertex arrays specified.
+// The rest of the command is a variable length list of blocks, where
+// each block is three dwords long and specifies two arrays.
+// The first dword of a block is split into two words, the lower significant
+// word refers to the first array, the more significant word to the second
+// array in the block.
+// The low byte of each word contains the size of an array entry in dwords,
+// the high byte contains the stride of the array.
+// The second dword of a block contains the pointer to the first array,
+// the third dword of a block contains the pointer to the second array.
+// Note that if the total number of arrays is odd, the third dword of
+// the last block is omitted.
+#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER 0x00003300
+#define R300_EB_UNK1_SHIFT 24
+#define R300_EB_UNK1 (0x80<<24)
+#define R300_EB_UNK2 0x0810
+#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __R300_REG_H_ */
diff --git a/usr/src/uts/intel/io/drm/radeon_cp.c b/usr/src/uts/intel/io/drm/radeon_cp.c
new file mode 100644
index 0000000..0261249
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_cp.c
@@ -0,0 +1,2387 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "r300_reg.h"
+#include "radeon_io32.h"
+
+static int radeon_do_cleanup_cp(drm_device_t *dev);
+
+/* CP microcode (from ATI) */
+static const u32 R200_cp_microcode[][2] = {
+ {0x21007000, 0000000000},
+ {0x20007000, 0000000000},
+ {0x000000ab, 0x00000004},
+ {0x000000af, 0x00000004},
+ {0x66544a49, 0000000000},
+ {0x49494174, 0000000000},
+ {0x54517d83, 0000000000},
+ {0x498d8b64, 0000000000},
+ {0x49494949, 0000000000},
+ {0x49da493c, 0000000000},
+ {0x49989898, 0000000000},
+ {0xd34949d5, 0000000000},
+ {0x9dc90e11, 0000000000},
+ {0xce9b9b9b, 0000000000},
+ {0x000f0000, 0x00000016},
+ {0x352e232c, 0000000000},
+ {0x00000013, 0x00000004},
+ {0x000f0000, 0x00000016},
+ {0x352e272c, 0000000000},
+ {0x000f0001, 0x00000016},
+ {0x3239362f, 0000000000},
+ {0x000077ef, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x00000020, 0x0000001a},
+ {0x00004000, 0x0000001e},
+ {0x00061000, 0x00000002},
+ {0x00000020, 0x0000001a},
+ {0x00004000, 0x0000001e},
+ {0x00061000, 0x00000002},
+ {0x00000020, 0x0000001a},
+ {0x00004000, 0x0000001e},
+ {0x00000016, 0x00000004},
+ {0x0003802a, 0x00000002},
+ {0x040067e0, 0x00000002},
+ {0x00000016, 0x00000004},
+ {0x000077e0, 0x00000002},
+ {0x00065000, 0x00000002},
+ {0x000037e1, 0x00000002},
+ {0x040067e1, 0x00000006},
+ {0x000077e0, 0x00000002},
+ {0x000077e1, 0x00000002},
+ {0x000077e1, 0x00000006},
+ {0xffffffff, 0000000000},
+ {0x10000000, 0000000000},
+ {0x0003802a, 0x00000002},
+ {0x040067e0, 0x00000006},
+ {0x00007675, 0x00000002},
+ {0x00007676, 0x00000002},
+ {0x00007677, 0x00000002},
+ {0x00007678, 0x00000006},
+ {0x0003802b, 0x00000002},
+ {0x04002676, 0x00000002},
+ {0x00007677, 0x00000002},
+ {0x00007678, 0x00000006},
+ {0x0000002e, 0x00000018},
+ {0x0000002e, 0x00000018},
+ {0000000000, 0x00000006},
+ {0x0000002f, 0x00000018},
+ {0x0000002f, 0x00000018},
+ {0000000000, 0x00000006},
+ {0x01605000, 0x00000002},
+ {0x00065000, 0x00000002},
+ {0x00098000, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x64c0603d, 0x00000004},
+ {0x00080000, 0x00000016},
+ {0000000000, 0000000000},
+ {0x0400251d, 0x00000002},
+ {0x00007580, 0x00000002},
+ {0x00067581, 0x00000002},
+ {0x04002580, 0x00000002},
+ {0x00067581, 0x00000002},
+ {0x00000046, 0x00000004},
+ {0x00005000, 0000000000},
+ {0x00061000, 0x00000002},
+ {0x0000750e, 0x00000002},
+ {0x00019000, 0x00000002},
+ {0x00011055, 0x00000014},
+ {0x00000055, 0x00000012},
+ {0x0400250f, 0x00000002},
+ {0x0000504a, 0x00000004},
+ {0x00007565, 0x00000002},
+ {0x00007566, 0x00000002},
+ {0x00000051, 0x00000004},
+ {0x01e655b4, 0x00000002},
+ {0x4401b0dc, 0x00000002},
+ {0x01c110dc, 0x00000002},
+ {0x2666705d, 0x00000018},
+ {0x040c2565, 0x00000002},
+ {0x0000005d, 0x00000018},
+ {0x04002564, 0x00000002},
+ {0x00007566, 0x00000002},
+ {0x00000054, 0x00000004},
+ {0x00401060, 0x00000008},
+ {0x00101000, 0x00000002},
+ {0x000d80ff, 0x00000002},
+ {0x00800063, 0x00000008},
+ {0x000f9000, 0x00000002},
+ {0x000e00ff, 0x00000002},
+ {0000000000, 0x00000006},
+ {0x00000080, 0x00000018},
+ {0x00000054, 0x00000004},
+ {0x00007576, 0x00000002},
+ {0x00065000, 0x00000002},
+ {0x00009000, 0x00000002},
+ {0x00041000, 0x00000002},
+ {0x0c00350e, 0x00000002},
+ {0x00049000, 0x00000002},
+ {0x00051000, 0x00000002},
+ {0x01e785f8, 0x00000002},
+ {0x00200000, 0x00000002},
+ {0x00600073, 0x0000000c},
+ {0x00007563, 0x00000002},
+ {0x006075f0, 0x00000021},
+ {0x20007068, 0x00000004},
+ {0x00005068, 0x00000004},
+ {0x00007576, 0x00000002},
+ {0x00007577, 0x00000002},
+ {0x0000750e, 0x00000002},
+ {0x0000750f, 0x00000002},
+ {0x00a05000, 0x00000002},
+ {0x00600076, 0x0000000c},
+ {0x006075f0, 0x00000021},
+ {0x000075f8, 0x00000002},
+ {0x00000076, 0x00000004},
+ {0x000a750e, 0x00000002},
+ {0x0020750f, 0x00000002},
+ {0x00600079, 0x00000004},
+ {0x00007570, 0x00000002},
+ {0x00007571, 0x00000002},
+ {0x00007572, 0x00000006},
+ {0x00005000, 0x00000002},
+ {0x00a05000, 0x00000002},
+ {0x00007568, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x00000084, 0x0000000c},
+ {0x00058000, 0x00000002},
+ {0x0c607562, 0x00000002},
+ {0x00000086, 0x00000004},
+ {0x00600085, 0x00000004},
+ {0x400070dd, 0000000000},
+ {0x000380dd, 0x00000002},
+ {0x00000093, 0x0000001c},
+ {0x00065095, 0x00000018},
+ {0x040025bb, 0x00000002},
+ {0x00061096, 0x00000018},
+ {0x040075bc, 0000000000},
+ {0x000075bb, 0x00000002},
+ {0x000075bc, 0000000000},
+ {0x00090000, 0x00000006},
+ {0x00090000, 0x00000002},
+ {0x000d8002, 0x00000006},
+ {0x00005000, 0x00000002},
+ {0x00007821, 0x00000002},
+ {0x00007800, 0000000000},
+ {0x00007821, 0x00000002},
+ {0x00007800, 0000000000},
+ {0x01665000, 0x00000002},
+ {0x000a0000, 0x00000002},
+ {0x000671cc, 0x00000002},
+ {0x0286f1cd, 0x00000002},
+ {0x000000a3, 0x00000010},
+ {0x21007000, 0000000000},
+ {0x000000aa, 0x0000001c},
+ {0x00065000, 0x00000002},
+ {0x000a0000, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x000b0000, 0x00000002},
+ {0x38067000, 0x00000002},
+ {0x000a00a6, 0x00000004},
+ {0x20007000, 0000000000},
+ {0x01200000, 0x00000002},
+ {0x20077000, 0x00000002},
+ {0x01200000, 0x00000002},
+ {0x20007000, 0000000000},
+ {0x00061000, 0x00000002},
+ {0x0120751b, 0x00000002},
+ {0x8040750a, 0x00000002},
+ {0x8040750b, 0x00000002},
+ {0x00110000, 0x00000002},
+ {0x000380dd, 0x00000002},
+ {0x000000bd, 0x0000001c},
+ {0x00061096, 0x00000018},
+ {0x844075bd, 0x00000002},
+ {0x00061095, 0x00000018},
+ {0x840075bb, 0x00000002},
+ {0x00061096, 0x00000018},
+ {0x844075bc, 0x00000002},
+ {0x000000c0, 0x00000004},
+ {0x804075bd, 0x00000002},
+ {0x800075bb, 0x00000002},
+ {0x804075bc, 0x00000002},
+ {0x00108000, 0x00000002},
+ {0x01400000, 0x00000002},
+ {0x006000c4, 0x0000000c},
+ {0x20c07000, 0x00000020},
+ {0x000000c6, 0x00000012},
+ {0x00800000, 0x00000006},
+ {0x0080751d, 0x00000006},
+ {0x000025bb, 0x00000002},
+ {0x000040c0, 0x00000004},
+ {0x0000775c, 0x00000002},
+ {0x00a05000, 0x00000002},
+ {0x00661000, 0x00000002},
+ {0x0460275d, 0x00000020},
+ {0x00004000, 0000000000},
+ {0x00007999, 0x00000002},
+ {0x00a05000, 0x00000002},
+ {0x00661000, 0x00000002},
+ {0x0460299b, 0x00000020},
+ {0x00004000, 0000000000},
+ {0x01e00830, 0x00000002},
+ {0x21007000, 0000000000},
+ {0x00005000, 0x00000002},
+ {0x00038042, 0x00000002},
+ {0x040025e0, 0x00000002},
+ {0x000075e1, 0000000000},
+ {0x00000001, 0000000000},
+ {0x000380d9, 0x00000002},
+ {0x04007394, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+};
+
+static const u32 radeon_cp_microcode[][2] = {
+ {0x21007000, 0000000000},
+ {0x20007000, 0000000000},
+ {0x000000b4, 0x00000004},
+ {0x000000b8, 0x00000004},
+ {0x6f5b4d4c, 0000000000},
+ {0x4c4c427f, 0000000000},
+ {0x5b568a92, 0000000000},
+ {0x4ca09c6d, 0000000000},
+ {0xad4c4c4c, 0000000000},
+ {0x4ce1af3d, 0000000000},
+ {0xd8afafaf, 0000000000},
+ {0xd64c4cdc, 0000000000},
+ {0x4cd10d10, 0000000000},
+ {0x000f0000, 0x00000016},
+ {0x362f242d, 0000000000},
+ {0x00000012, 0x00000004},
+ {0x000f0000, 0x00000016},
+ {0x362f282d, 0000000000},
+ {0x000380e7, 0x00000002},
+ {0x04002c97, 0x00000002},
+ {0x000f0001, 0x00000016},
+ {0x333a3730, 0000000000},
+ {0x000077ef, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x00000021, 0x0000001a},
+ {0x00004000, 0x0000001e},
+ {0x00061000, 0x00000002},
+ {0x00000021, 0x0000001a},
+ {0x00004000, 0x0000001e},
+ {0x00061000, 0x00000002},
+ {0x00000021, 0x0000001a},
+ {0x00004000, 0x0000001e},
+ {0x00000017, 0x00000004},
+ {0x0003802b, 0x00000002},
+ {0x040067e0, 0x00000002},
+ {0x00000017, 0x00000004},
+ {0x000077e0, 0x00000002},
+ {0x00065000, 0x00000002},
+ {0x000037e1, 0x00000002},
+ {0x040067e1, 0x00000006},
+ {0x000077e0, 0x00000002},
+ {0x000077e1, 0x00000002},
+ {0x000077e1, 0x00000006},
+ {0xffffffff, 0000000000},
+ {0x10000000, 0000000000},
+ {0x0003802b, 0x00000002},
+ {0x040067e0, 0x00000006},
+ {0x00007675, 0x00000002},
+ {0x00007676, 0x00000002},
+ {0x00007677, 0x00000002},
+ {0x00007678, 0x00000006},
+ {0x0003802c, 0x00000002},
+ {0x04002676, 0x00000002},
+ {0x00007677, 0x00000002},
+ {0x00007678, 0x00000006},
+ {0x0000002f, 0x00000018},
+ {0x0000002f, 0x00000018},
+ {0000000000, 0x00000006},
+ {0x00000030, 0x00000018},
+ {0x00000030, 0x00000018},
+ {0000000000, 0x00000006},
+ {0x01605000, 0x00000002},
+ {0x00065000, 0x00000002},
+ {0x00098000, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x64c0603e, 0x00000004},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x00080000, 0x00000016},
+ {0000000000, 0000000000},
+ {0x0400251d, 0x00000002},
+ {0x00007580, 0x00000002},
+ {0x00067581, 0x00000002},
+ {0x04002580, 0x00000002},
+ {0x00067581, 0x00000002},
+ {0x00000049, 0x00000004},
+ {0x00005000, 0000000000},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x0000750e, 0x00000002},
+ {0x00019000, 0x00000002},
+ {0x00011055, 0x00000014},
+ {0x00000055, 0x00000012},
+ {0x0400250f, 0x00000002},
+ {0x0000504f, 0x00000004},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x00007565, 0x00000002},
+ {0x00007566, 0x00000002},
+ {0x00000058, 0x00000004},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x01e655b4, 0x00000002},
+ {0x4401b0e4, 0x00000002},
+ {0x01c110e4, 0x00000002},
+ {0x26667066, 0x00000018},
+ {0x040c2565, 0x00000002},
+ {0x00000066, 0x00000018},
+ {0x04002564, 0x00000002},
+ {0x00007566, 0x00000002},
+ {0x0000005d, 0x00000004},
+ {0x00401069, 0x00000008},
+ {0x00101000, 0x00000002},
+ {0x000d80ff, 0x00000002},
+ {0x0080006c, 0x00000008},
+ {0x000f9000, 0x00000002},
+ {0x000e00ff, 0x00000002},
+ {0000000000, 0x00000006},
+ {0x0000008f, 0x00000018},
+ {0x0000005b, 0x00000004},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x00007576, 0x00000002},
+ {0x00065000, 0x00000002},
+ {0x00009000, 0x00000002},
+ {0x00041000, 0x00000002},
+ {0x0c00350e, 0x00000002},
+ {0x00049000, 0x00000002},
+ {0x00051000, 0x00000002},
+ {0x01e785f8, 0x00000002},
+ {0x00200000, 0x00000002},
+ {0x0060007e, 0x0000000c},
+ {0x00007563, 0x00000002},
+ {0x006075f0, 0x00000021},
+ {0x20007073, 0x00000004},
+ {0x00005073, 0x00000004},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x00007576, 0x00000002},
+ {0x00007577, 0x00000002},
+ {0x0000750e, 0x00000002},
+ {0x0000750f, 0x00000002},
+ {0x00a05000, 0x00000002},
+ {0x00600083, 0x0000000c},
+ {0x006075f0, 0x00000021},
+ {0x000075f8, 0x00000002},
+ {0x00000083, 0x00000004},
+ {0x000a750e, 0x00000002},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x0020750f, 0x00000002},
+ {0x00600086, 0x00000004},
+ {0x00007570, 0x00000002},
+ {0x00007571, 0x00000002},
+ {0x00007572, 0x00000006},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x00005000, 0x00000002},
+ {0x00a05000, 0x00000002},
+ {0x00007568, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x00000095, 0x0000000c},
+ {0x00058000, 0x00000002},
+ {0x0c607562, 0x00000002},
+ {0x00000097, 0x00000004},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x00600096, 0x00000004},
+ {0x400070e5, 0000000000},
+ {0x000380e6, 0x00000002},
+ {0x040025c5, 0x00000002},
+ {0x000380e5, 0x00000002},
+ {0x000000a8, 0x0000001c},
+ {0x000650aa, 0x00000018},
+ {0x040025bb, 0x00000002},
+ {0x000610ab, 0x00000018},
+ {0x040075bc, 0000000000},
+ {0x000075bb, 0x00000002},
+ {0x000075bc, 0000000000},
+ {0x00090000, 0x00000006},
+ {0x00090000, 0x00000002},
+ {0x000d8002, 0x00000006},
+ {0x00007832, 0x00000002},
+ {0x00005000, 0x00000002},
+ {0x000380e7, 0x00000002},
+ {0x04002c97, 0x00000002},
+ {0x00007820, 0x00000002},
+ {0x00007821, 0x00000002},
+ {0x00007800, 0000000000},
+ {0x01200000, 0x00000002},
+ {0x20077000, 0x00000002},
+ {0x01200000, 0x00000002},
+ {0x20007000, 0x00000002},
+ {0x00061000, 0x00000002},
+ {0x0120751b, 0x00000002},
+ {0x8040750a, 0x00000002},
+ {0x8040750b, 0x00000002},
+ {0x00110000, 0x00000002},
+ {0x000380e5, 0x00000002},
+ {0x000000c6, 0x0000001c},
+ {0x000610ab, 0x00000018},
+ {0x844075bd, 0x00000002},
+ {0x000610aa, 0x00000018},
+ {0x840075bb, 0x00000002},
+ {0x000610ab, 0x00000018},
+ {0x844075bc, 0x00000002},
+ {0x000000c9, 0x00000004},
+ {0x804075bd, 0x00000002},
+ {0x800075bb, 0x00000002},
+ {0x804075bc, 0x00000002},
+ {0x00108000, 0x00000002},
+ {0x01400000, 0x00000002},
+ {0x006000cd, 0x0000000c},
+ {0x20c07000, 0x00000020},
+ {0x000000cf, 0x00000012},
+ {0x00800000, 0x00000006},
+ {0x0080751d, 0x00000006},
+ {0000000000, 0000000000},
+ {0x0000775c, 0x00000002},
+ {0x00a05000, 0x00000002},
+ {0x00661000, 0x00000002},
+ {0x0460275d, 0x00000020},
+ {0x00004000, 0000000000},
+ {0x01e00830, 0x00000002},
+ {0x21007000, 0000000000},
+ {0x6464614d, 0000000000},
+ {0x69687420, 0000000000},
+ {0x00000073, 0000000000},
+ {0000000000, 0000000000},
+ {0x00005000, 0x00000002},
+ {0x000380d0, 0x00000002},
+ {0x040025e0, 0x00000002},
+ {0x000075e1, 0000000000},
+ {0x00000001, 0000000000},
+ {0x000380e0, 0x00000002},
+ {0x04002394, 0x00000002},
+ {0x00005000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0x00000008, 0000000000},
+ {0x00000004, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+ {0000000000, 0000000000},
+};
+
+static const u32 R300_cp_microcode[][2] = {
+ { 0x4200e000, 0000000000 },
+ { 0x4000e000, 0000000000 },
+ { 0x000000af, 0x00000008 },
+ { 0x000000b3, 0x00000008 },
+ { 0x6c5a504f, 0000000000 },
+ { 0x4f4f497a, 0000000000 },
+ { 0x5a578288, 0000000000 },
+ { 0x4f91906a, 0000000000 },
+ { 0x4f4f4f4f, 0000000000 },
+ { 0x4fe24f44, 0000000000 },
+ { 0x4f9c9c9c, 0000000000 },
+ { 0xdc4f4fde, 0000000000 },
+ { 0xa1cd4f4f, 0000000000 },
+ { 0xd29d9d9d, 0000000000 },
+ { 0x4f0f9fd7, 0000000000 },
+ { 0x000ca000, 0x00000004 },
+ { 0x000d0012, 0x00000038 },
+ { 0x0000e8b4, 0x00000004 },
+ { 0x000d0014, 0x00000038 },
+ { 0x0000e8b6, 0x00000004 },
+ { 0x000d0016, 0x00000038 },
+ { 0x0000e854, 0x00000004 },
+ { 0x000d0018, 0x00000038 },
+ { 0x0000e855, 0x00000004 },
+ { 0x000d001a, 0x00000038 },
+ { 0x0000e856, 0x00000004 },
+ { 0x000d001c, 0x00000038 },
+ { 0x0000e857, 0x00000004 },
+ { 0x000d001e, 0x00000038 },
+ { 0x0000e824, 0x00000004 },
+ { 0x000d0020, 0x00000038 },
+ { 0x0000e825, 0x00000004 },
+ { 0x000d0022, 0x00000038 },
+ { 0x0000e830, 0x00000004 },
+ { 0x000d0024, 0x00000038 },
+ { 0x0000f0c0, 0x00000004 },
+ { 0x000d0026, 0x00000038 },
+ { 0x0000f0c1, 0x00000004 },
+ { 0x000d0028, 0x00000038 },
+ { 0x0000f041, 0x00000004 },
+ { 0x000d002a, 0x00000038 },
+ { 0x0000f184, 0x00000004 },
+ { 0x000d002c, 0x00000038 },
+ { 0x0000f185, 0x00000004 },
+ { 0x000d002e, 0x00000038 },
+ { 0x0000f186, 0x00000004 },
+ { 0x000d0030, 0x00000038 },
+ { 0x0000f187, 0x00000004 },
+ { 0x000d0032, 0x00000038 },
+ { 0x0000f180, 0x00000004 },
+ { 0x000d0034, 0x00000038 },
+ { 0x0000f393, 0x00000004 },
+ { 0x000d0036, 0x00000038 },
+ { 0x0000f38a, 0x00000004 },
+ { 0x000d0038, 0x00000038 },
+ { 0x0000f38e, 0x00000004 },
+ { 0x0000e821, 0x00000004 },
+ { 0x0140a000, 0x00000004 },
+ { 0x00000043, 0x00000018 },
+ { 0x00cce800, 0x00000004 },
+ { 0x001b0001, 0x00000004 },
+ { 0x08004800, 0x00000004 },
+ { 0x001b0001, 0x00000004 },
+ { 0x08004800, 0x00000004 },
+ { 0x001b0001, 0x00000004 },
+ { 0x08004800, 0x00000004 },
+ { 0x0000003a, 0x00000008 },
+ { 0x0000a000, 0000000000 },
+ { 0x02c0a000, 0x00000004 },
+ { 0x000ca000, 0x00000004 },
+ { 0x00130000, 0x00000004 },
+ { 0x000c2000, 0x00000004 },
+ { 0xc980c045, 0x00000008 },
+ { 0x2000451d, 0x00000004 },
+ { 0x0000e580, 0x00000004 },
+ { 0x000ce581, 0x00000004 },
+ { 0x08004580, 0x00000004 },
+ { 0x000ce581, 0x00000004 },
+ { 0x0000004c, 0x00000008 },
+ { 0x0000a000, 0000000000 },
+ { 0x000c2000, 0x00000004 },
+ { 0x0000e50e, 0x00000004 },
+ { 0x00032000, 0x00000004 },
+ { 0x00022056, 0x00000028 },
+ { 0x00000056, 0x00000024 },
+ { 0x0800450f, 0x00000004 },
+ { 0x0000a050, 0x00000008 },
+ { 0x0000e565, 0x00000004 },
+ { 0x0000e566, 0x00000004 },
+ { 0x00000057, 0x00000008 },
+ { 0x03cca5b4, 0x00000004 },
+ { 0x05432000, 0x00000004 },
+ { 0x00022000, 0x00000004 },
+ { 0x4ccce063, 0x00000030 },
+ { 0x08274565, 0x00000004 },
+ { 0x00000063, 0x00000030 },
+ { 0x08004564, 0x00000004 },
+ { 0x0000e566, 0x00000004 },
+ { 0x0000005a, 0x00000008 },
+ { 0x00802066, 0x00000010 },
+ { 0x00202000, 0x00000004 },
+ { 0x001b00ff, 0x00000004 },
+ { 0x01000069, 0x00000010 },
+ { 0x001f2000, 0x00000004 },
+ { 0x001c00ff, 0x00000004 },
+ { 0000000000, 0x0000000c },
+ { 0x00000085, 0x00000030 },
+ { 0x0000005a, 0x00000008 },
+ { 0x0000e576, 0x00000004 },
+ { 0x000ca000, 0x00000004 },
+ { 0x00012000, 0x00000004 },
+ { 0x00082000, 0x00000004 },
+ { 0x1800650e, 0x00000004 },
+ { 0x00092000, 0x00000004 },
+ { 0x000a2000, 0x00000004 },
+ { 0x000f0000, 0x00000004 },
+ { 0x00400000, 0x00000004 },
+ { 0x00000079, 0x00000018 },
+ { 0x0000e563, 0x00000004 },
+ { 0x00c0e5f9, 0x000000c2 },
+ { 0x0000006e, 0x00000008 },
+ { 0x0000a06e, 0x00000008 },
+ { 0x0000e576, 0x00000004 },
+ { 0x0000e577, 0x00000004 },
+ { 0x0000e50e, 0x00000004 },
+ { 0x0000e50f, 0x00000004 },
+ { 0x0140a000, 0x00000004 },
+ { 0x0000007c, 0x00000018 },
+ { 0x00c0e5f9, 0x000000c2 },
+ { 0x0000007c, 0x00000008 },
+ { 0x0014e50e, 0x00000004 },
+ { 0x0040e50f, 0x00000004 },
+ { 0x00c0007f, 0x00000008 },
+ { 0x0000e570, 0x00000004 },
+ { 0x0000e571, 0x00000004 },
+ { 0x0000e572, 0x0000000c },
+ { 0x0000a000, 0x00000004 },
+ { 0x0140a000, 0x00000004 },
+ { 0x0000e568, 0x00000004 },
+ { 0x000c2000, 0x00000004 },
+ { 0x00000089, 0x00000018 },
+ { 0x000b0000, 0x00000004 },
+ { 0x18c0e562, 0x00000004 },
+ { 0x0000008b, 0x00000008 },
+ { 0x00c0008a, 0x00000008 },
+ { 0x000700e4, 0x00000004 },
+ { 0x00000097, 0x00000038 },
+ { 0x000ca099, 0x00000030 },
+ { 0x080045bb, 0x00000004 },
+ { 0x000c209a, 0x00000030 },
+ { 0x0800e5bc, 0000000000 },
+ { 0x0000e5bb, 0x00000004 },
+ { 0x0000e5bc, 0000000000 },
+ { 0x00120000, 0x0000000c },
+ { 0x00120000, 0x00000004 },
+ { 0x001b0002, 0x0000000c },
+ { 0x0000a000, 0x00000004 },
+ { 0x0000e821, 0x00000004 },
+ { 0x0000e800, 0000000000 },
+ { 0x0000e821, 0x00000004 },
+ { 0x0000e82e, 0000000000 },
+ { 0x02cca000, 0x00000004 },
+ { 0x00140000, 0x00000004 },
+ { 0x000ce1cc, 0x00000004 },
+ { 0x050de1cd, 0x00000004 },
+ { 0x000000a7, 0x00000020 },
+ { 0x4200e000, 0000000000 },
+ { 0x000000ae, 0x00000038 },
+ { 0x000ca000, 0x00000004 },
+ { 0x00140000, 0x00000004 },
+ { 0x000c2000, 0x00000004 },
+ { 0x00160000, 0x00000004 },
+ { 0x700ce000, 0x00000004 },
+ { 0x001400aa, 0x00000008 },
+ { 0x4000e000, 0000000000 },
+ { 0x02400000, 0x00000004 },
+ { 0x400ee000, 0x00000004 },
+ { 0x02400000, 0x00000004 },
+ { 0x4000e000, 0000000000 },
+ { 0x000c2000, 0x00000004 },
+ { 0x0240e51b, 0x00000004 },
+ { 0x0080e50a, 0x00000005 },
+ { 0x0080e50b, 0x00000005 },
+ { 0x00220000, 0x00000004 },
+ { 0x000700e4, 0x00000004 },
+ { 0x000000c1, 0x00000038 },
+ { 0x000c209a, 0x00000030 },
+ { 0x0880e5bd, 0x00000005 },
+ { 0x000c2099, 0x00000030 },
+ { 0x0800e5bb, 0x00000005 },
+ { 0x000c209a, 0x00000030 },
+ { 0x0880e5bc, 0x00000005 },
+ { 0x000000c4, 0x00000008 },
+ { 0x0080e5bd, 0x00000005 },
+ { 0x0000e5bb, 0x00000005 },
+ { 0x0080e5bc, 0x00000005 },
+ { 0x00210000, 0x00000004 },
+ { 0x02800000, 0x00000004 },
+ { 0x00c000c8, 0x00000018 },
+ { 0x4180e000, 0x00000040 },
+ { 0x000000ca, 0x00000024 },
+ { 0x01000000, 0x0000000c },
+ { 0x0100e51d, 0x0000000c },
+ { 0x000045bb, 0x00000004 },
+ { 0x000080c4, 0x00000008 },
+ { 0x0000f3ce, 0x00000004 },
+ { 0x0140a000, 0x00000004 },
+ { 0x00cc2000, 0x00000004 },
+ { 0x08c053cf, 0x00000040 },
+ { 0x00008000, 0000000000 },
+ { 0x0000f3d2, 0x00000004 },
+ { 0x0140a000, 0x00000004 },
+ { 0x00cc2000, 0x00000004 },
+ { 0x08c053d3, 0x00000040 },
+ { 0x00008000, 0000000000 },
+ { 0x0000f39d, 0x00000004 },
+ { 0x0140a000, 0x00000004 },
+ { 0x00cc2000, 0x00000004 },
+ { 0x08c0539e, 0x00000040 },
+ { 0x00008000, 0000000000 },
+ { 0x03c00830, 0x00000004 },
+ { 0x4200e000, 0000000000 },
+ { 0x0000a000, 0x00000004 },
+ { 0x200045e0, 0x00000004 },
+ { 0x0000e5e1, 0000000000 },
+ { 0x00000001, 0000000000 },
+ { 0x000700e1, 0x00000004 },
+ { 0x0800e394, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+ { 0000000000, 0000000000 },
+};
+
+static int RADEON_READ_PLL(drm_device_t *dev, int addr)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
+ return (RADEON_READ(RADEON_CLOCK_CNTL_DATA));
+}
+
+static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
+{
+ RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
+ return (RADEON_READ(RADEON_PCIE_DATA));
+}
+
+#if RADEON_FIFO_DEBUG
+static void radeon_status(drm_radeon_private_t *dev_priv)
+{
+ cmn_err(CE_NOTE, "RBBM_STATUS = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
+ cmn_err(CE_NOTE, "CP_RB_RTPR = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
+ cmn_err(CE_NOTE, "CP_RB_WTPR = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
+ cmn_err(CE_NOTE, "AIC_CNTL = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
+ cmn_err(CE_NOTE, "AIC_STAT = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_STAT));
+ cmn_err(CE_NOTE, "AIC_PT_BASE = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
+ cmn_err(CE_NOTE, "TLB_ADDR = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
+ cmn_err(CE_NOTE, "TLB_DATA = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
+}
+#endif
+
+/*
+ * Engine, FIFO control
+ */
+
+static int radeon_do_pixcache_flush(drm_radeon_private_t *dev_priv)
+{
+ u32 tmp;
+ int i;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
+ tmp |= RADEON_RB3D_DC_FLUSH_ALL;
+ RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) &
+ RADEON_RB3D_DC_BUSY)) {
+ return (0);
+ }
+ DRM_UDELAY(1);
+ }
+
+#if RADEON_FIFO_DEBUG
+ DRM_ERROR("failed!\n");
+ radeon_status(dev_priv);
+#endif
+ return (EBUSY);
+}
+
+static int radeon_do_wait_for_fifo(drm_radeon_private_t *dev_priv, int entries)
+{
+ int i;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ int slots = (RADEON_READ(RADEON_RBBM_STATUS) &
+ RADEON_RBBM_FIFOCNT_MASK);
+ if (slots >= entries)
+ return (0);
+ DRM_UDELAY(1);
+ }
+
+
+ DRM_ERROR("radeon_do_wait_for_fifo: failed timeout=%d",
+ dev_priv->usec_timeout);
+#if RADEON_FIFO_DEBUG
+ radeon_status(dev_priv);
+#endif
+ return (EBUSY);
+}
+
+static int radeon_do_wait_for_idle(drm_radeon_private_t *dev_priv)
+{
+ int i, ret;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ ret = radeon_do_wait_for_fifo(dev_priv, 64);
+ if (ret)
+ return (ret);
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (!(RADEON_READ(RADEON_RBBM_STATUS) &
+ RADEON_RBBM_ACTIVE)) {
+ (void) radeon_do_pixcache_flush(dev_priv);
+ return (0);
+ }
+ DRM_UDELAY(1);
+ }
+
+ DRM_ERROR("radeon_do_wait_for_idle: failed timeout=%d",
+ dev_priv->usec_timeout);
+
+#if RADEON_FIFO_DEBUG
+ radeon_status(dev_priv);
+#endif
+ return (EBUSY);
+}
+
+/*
+ * CP control, initialization
+ */
+
+/* Load the microcode for the CP */
+static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+ int i;
+
+ (void) radeon_do_wait_for_idle(dev_priv);
+
+ RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
+
+ if (dev_priv->microcode_version == UCODE_R200) {
+ DRM_INFO("Loading R200 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+ R200_cp_microcode[i][1]);
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+ R200_cp_microcode[i][0]);
+ }
+ } else if (dev_priv->microcode_version == UCODE_R300) {
+ DRM_INFO("Loading R300 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+ R300_cp_microcode[i][1]);
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+ R300_cp_microcode[i][0]);
+ }
+ } else {
+ for (i = 0; i < 256; i++) {
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+ radeon_cp_microcode[i][1]);
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+ radeon_cp_microcode[i][0]);
+ }
+ }
+}
+
+/*
+ * Flush any pending commands to the CP. This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+/*ARGSUSED*/
+static void radeon_do_cp_flush(drm_radeon_private_t *dev_priv)
+{
+ DRM_DEBUG("\n");
+
+#if 0
+ u32 tmp;
+
+ tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
+ RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
+#endif
+}
+
+/* Wait for the CP to go idle. */
+int
+radeon_do_cp_idle(drm_radeon_private_t *dev_priv)
+{
+ RING_LOCALS;
+
+ BEGIN_RING(6);
+
+ RADEON_PURGE_CACHE();
+ RADEON_PURGE_ZCACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ return (radeon_do_wait_for_idle(dev_priv));
+}
+
+/* Start the Command Processor. */
+static void radeon_do_cp_start(drm_radeon_private_t *dev_priv)
+{
+ RING_LOCALS;
+
+ (void) radeon_do_wait_for_idle(dev_priv);
+
+ RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
+
+ dev_priv->cp_running = 1;
+
+ BEGIN_RING(6);
+
+ RADEON_PURGE_CACHE();
+ RADEON_PURGE_ZCACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+ COMMIT_RING();
+}
+
+/*
+ * Reset the Command Processor. This will not flush any pending
+ * commands, so you must wait for the CP command stream to complete
+ * before calling this routine.
+ */
+static void radeon_do_cp_reset(drm_radeon_private_t *dev_priv)
+{
+ u32 cur_read_ptr;
+ DRM_DEBUG("\n");
+
+ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+ SET_RING_HEAD(dev_priv, cur_read_ptr);
+ dev_priv->ring.tail = cur_read_ptr;
+}
+
+/*
+ * Stop the Command Processor. This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CP
+ * to go idle before calling this routine.
+ */
+static void radeon_do_cp_stop(drm_radeon_private_t *dev_priv)
+{
+ DRM_DEBUG("\n");
+
+ RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
+
+ dev_priv->cp_running = 0;
+}
+
+/* Reset the engine. This will stop the CP if it is running. */
+static int radeon_do_engine_reset(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset;
+ DRM_DEBUG("\n");
+
+ (void) radeon_do_pixcache_flush(dev_priv);
+
+ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+ mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+ RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB |
+ RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB |
+ RADEON_FORCEON_MC |
+ RADEON_FORCEON_AIC));
+
+ rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+ RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB));
+ (void) RADEON_READ(RADEON_RBBM_SOFT_RESET);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+ ~(RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB)));
+ (void) RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+
+ /* Reset the CP ring */
+ radeon_do_cp_reset(dev_priv);
+
+ /* The CP is no longer running after an engine reset */
+ dev_priv->cp_running = 0;
+
+ /* Reset any pending vertex, indirect buffers */
+ radeon_freelist_reset(dev);
+
+ return (0);
+}
+
+static void
+radeon_cp_init_ring_buffer(drm_device_t *dev, drm_radeon_private_t *dev_priv)
+{
+ u32 ring_start, cur_read_ptr;
+ u32 tmp;
+
+ /*
+ * Initialize the memory controller. With new memory map, the fb
+ * location is not changed, it should have been properly initialized
+ * already. Part of the problem is that the code below is bogus,
+ * assuming the GART is always appended to the fb which is not
+ * necessarily the case
+ */
+ if (!dev_priv->new_memmap)
+ RADEON_WRITE(RADEON_MC_FB_LOCATION,
+ ((dev_priv->gart_vm_start - 1) & 0xffff0000) |
+ (dev_priv->fb_location >> 16));
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION,
+ (((dev_priv->gart_vm_start - 1 +
+ dev_priv->gart_size) & 0xffff0000) |
+ (dev_priv->gart_vm_start >> 16)));
+
+ ring_start = dev_priv->cp_ring->offset -
+ dev->agp->base + dev_priv->gart_vm_start;
+ } else
+#endif
+ ring_start = (dev_priv->cp_ring->offset -
+ (unsigned long)dev->sg->virtual +
+ dev_priv->gart_vm_start);
+
+ RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
+
+ /* Set the write pointer delay */
+ RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+ SET_RING_HEAD(dev_priv, cur_read_ptr);
+ dev_priv->ring.tail = cur_read_ptr;
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+ dev_priv->ring_rptr->offset -
+ dev->agp->base + dev_priv->gart_vm_start);
+ } else
+#endif
+ {
+ drm_sg_mem_t *entry = dev->sg;
+ unsigned long tmp_ofs, page_ofs;
+
+ tmp_ofs = dev_priv->ring_rptr->offset -
+ (unsigned long)dev->sg->virtual;
+ page_ofs = tmp_ofs >> PAGE_SHIFT;
+
+ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
+ DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
+ (unsigned long)entry->busaddr[page_ofs],
+ entry->handle + tmp_ofs);
+ }
+
+ /* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+ RADEON_WRITE(RADEON_CP_RB_CNTL,
+ dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
+#else
+ RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
+#endif
+
+ /* Start with assuming that writeback doesn't work */
+ dev_priv->writeback_works = 0;
+
+ /*
+ * Initialize the scratch register pointer. This will cause
+ * the scratch register values to be written out to memory
+ * whenever they are updated.
+ *
+ * We simply put this behind the ring read pointer, this works
+ * with PCI GART as well as (whatever kind of) AGP GART
+ */
+ RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
+ + RADEON_SCRATCH_REG_OFFSET);
+
+ dev_priv->scratch = ((__volatile__ u32 *)
+ dev_priv->ring_rptr->handle +
+ (RADEON_SCRATCH_REG_OFFSET / sizeof (u32)));
+
+ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
+
+ /* Turn on bus mastering */
+ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+
+ dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
+ RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
+
+ dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
+ RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
+ dev_priv->sarea_priv->last_dispatch);
+
+ dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
+ RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
+
+ (void) radeon_do_wait_for_idle(dev_priv);
+
+ /* Sync everything up */
+ RADEON_WRITE(RADEON_ISYNC_CNTL,
+ (RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI));
+
+}
+
+static void radeon_test_writeback(drm_radeon_private_t *dev_priv)
+{
+#if 0
+ u32 tmp;
+
+ /*
+ * Writeback doesn't seem to work everywhere, test it here and possibly
+ * enable it if it appears to work
+ */
+ DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
+ RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
+
+ for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+ if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
+ 0xdeadbeef)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ tmp = dev_priv->usec_timeout;
+
+ if (tmp < dev_priv->usec_timeout) {
+ dev_priv->writeback_works = 1;
+ DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+ } else {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback test failed\n");
+ }
+ if (radeon_no_wb == 1) {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback forced off\n");
+ }
+#else
+ /*
+ * Writeback doesn't work everywhere. And the timeout is so long
+ * so Xserver needs more time to start itself. But dtlogin doesn't
+ * want to wait for the timeout, and it just stops Xserver and
+ * restart it again. As a result, Xserver cannot start. So, we
+ * just ignore writeback here.
+ */
+ dev_priv->writeback_works = 0;
+#endif
+
+ if (!dev_priv->writeback_works) {
+ /*
+ * Disable writeback to avoid unnecessary bus master
+ * transfers
+ */
+ RADEON_WRITE(RADEON_CP_RB_CNTL,
+ RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE);
+ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
+ }
+
+}
+
+/* Enable or disable PCI-E GART on the chip */
+static void radeon_set_pciegart(drm_radeon_private_t *dev_priv, int on)
+{
+ u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
+ if (on) {
+
+ DRM_DEBUG("programming pcie %08X %08lX %08X\n",
+ dev_priv->gart_vm_start,
+ (long)dev_priv->gart_info.bus_addr,
+ dev_priv->gart_size);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
+ dev_priv->gart_vm_start);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
+ dev_priv->gart_info.bus_addr);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
+ dev_priv->gart_vm_start);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
+ dev_priv->gart_vm_start + dev_priv->gart_size - 1);
+
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
+
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+ RADEON_PCIE_TX_GART_EN);
+ } else {
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+ tmp & ~RADEON_PCIE_TX_GART_EN);
+ }
+}
+
+/* Enable or disable PCI GART on the chip */
+static void radeon_set_pcigart(drm_radeon_private_t *dev_priv, int on)
+{
+ u32 tmp;
+
+ if (dev_priv->flags & RADEON_IS_PCIE) {
+ radeon_set_pciegart(dev_priv, on);
+ return;
+ }
+
+ tmp = RADEON_READ(RADEON_AIC_CNTL);
+
+ if (on) {
+ RADEON_WRITE(RADEON_AIC_CNTL,
+ tmp | RADEON_PCIGART_TRANSLATE_EN);
+
+ /* set PCI GART page-table base address */
+ RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
+
+ /* set address range for PCI address translate */
+ RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
+ RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start +
+ dev_priv->gart_size - 1);
+
+ /* Turn off AGP aperture -- is this required for PCI GART? */
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
+ RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
+ } else {
+ RADEON_WRITE(RADEON_AIC_CNTL,
+ tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+ }
+}
+
+static int radeon_do_init_cp(drm_device_t *dev, drm_radeon_init_t *init)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ /* if we require new memory map but we don't have it fail */
+ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+ DRM_ERROR("Cannot initialise DRM on this card\n"
+ "This card requires a new X.org DDX for 3D\n");
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+
+ if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+ DRM_DEBUG("Forcing AGP card to PCI mode\n");
+ dev_priv->flags &= ~RADEON_IS_AGP;
+ } else if (!(dev_priv->flags &
+ (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) &&
+ !init->is_pci) {
+ DRM_DEBUG("Restoring AGP flag\n");
+ dev_priv->flags |= RADEON_IS_AGP;
+ }
+
+ if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
+ DRM_ERROR("PCI GART memory not allocated!\n");
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+
+ dev_priv->usec_timeout = init->usec_timeout;
+ if (dev_priv->usec_timeout < 1 ||
+ dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+
+ switch (init->func) {
+ case RADEON_INIT_R200_CP:
+ dev_priv->microcode_version = UCODE_R200;
+ break;
+ case RADEON_INIT_R300_CP:
+ dev_priv->microcode_version = UCODE_R300;
+ break;
+ default:
+ dev_priv->microcode_version = UCODE_R100;
+ }
+
+ dev_priv->do_boxes = 0;
+ dev_priv->cp_mode = init->cp_mode;
+
+ /*
+ * We don't support anything other than bus-mastering ring mode,
+ * but the ring can be in either AGP or PCI space for the ring
+ * read pointer.
+ */
+ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+ (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+ DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+
+ switch (init->fb_bpp) {
+ case 16:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+ break;
+ case 32:
+ default:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+ break;
+ }
+ dev_priv->front_offset = init->front_offset;
+ dev_priv->front_pitch = init->front_pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->back_pitch = init->back_pitch;
+
+ switch (init->depth_bpp) {
+ case 16:
+ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
+ break;
+ case 32:
+ default:
+ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
+ break;
+ }
+ dev_priv->depth_offset = init->depth_offset;
+ dev_priv->depth_pitch = init->depth_pitch;
+
+ /*
+ * Hardware state for depth clears. Remove this if/when we no
+ * longer clear the depth buffer with a 3D rectangle. Hard-code
+ * all values to prevent unwanted 3D state from slipping through
+ * and screwing with the clear operation.
+ */
+ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
+ (dev_priv->color_fmt << 10) |
+ (dev_priv->microcode_version ==
+ UCODE_R100 ? RADEON_ZBLOCK16 : 0));
+
+ dev_priv->depth_clear.rb3d_zstencilcntl =
+ (dev_priv->depth_fmt |
+ RADEON_Z_TEST_ALWAYS |
+ RADEON_STENCIL_TEST_ALWAYS |
+ RADEON_STENCIL_S_FAIL_REPLACE |
+ RADEON_STENCIL_ZPASS_REPLACE |
+ RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
+
+ dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
+ RADEON_BFACE_SOLID |
+ RADEON_FFACE_SOLID |
+ RADEON_FLAT_SHADE_VTX_LAST |
+ RADEON_DIFFUSE_SHADE_FLAT |
+ RADEON_ALPHA_SHADE_FLAT |
+ RADEON_SPECULAR_SHADE_FLAT |
+ RADEON_FOG_SHADE_FLAT |
+ RADEON_VTX_PIX_CENTER_OGL |
+ RADEON_ROUND_MODE_TRUNC |
+ RADEON_ROUND_PREC_8TH_PIX);
+
+ DRM_GETSAREA();
+
+ dev_priv->ring_offset = init->ring_offset;
+ dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+ dev_priv->buffers_offset = init->buffers_offset;
+ dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+ if (!dev_priv->sarea) {
+ DRM_ERROR("could not find sarea!\n");
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+
+ dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+ if (!dev_priv->cp_ring) {
+ DRM_ERROR("could not find cp ring region, offset=0x%lx\n",
+ init->ring_offset);
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+ dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+ if (!dev_priv->ring_rptr) {
+ DRM_ERROR("could not find ring read pointer, offset=0x%lx\n",
+ init->ring_rptr_offset);
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+ dev->agp_buffer_token = init->buffers_offset;
+ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+ if (!dev->agp_buffer_map) {
+ DRM_ERROR("could not find dma buffer region, offset=0x%lx\n",
+ init->buffers_offset);
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+
+ if (init->gart_textures_offset) {
+ dev_priv->gart_textures =
+ drm_core_findmap(dev, init->gart_textures_offset);
+ if (!dev_priv->gart_textures) {
+ DRM_ERROR("could not find GART texture region, "
+ "offset=0x%lx\n", init->gart_textures_offset);
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+ }
+
+ dev_priv->sarea_priv = (drm_radeon_sarea_t *)(uintptr_t)
+ ((u8 *)(uintptr_t)dev_priv->sarea->handle +
+ init->sarea_priv_offset);
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ drm_core_ioremap(dev_priv->cp_ring, dev);
+ drm_core_ioremap(dev_priv->ring_rptr, dev);
+ drm_core_ioremap(dev->agp_buffer_map, dev);
+ if (!dev_priv->cp_ring->handle ||
+ !dev_priv->ring_rptr->handle ||
+ !dev->agp_buffer_map->handle) {
+ DRM_ERROR("radeon_do_init_cp: failed to find agp "
+ "regions,"
+ "cp_ring=0x%x, ring_rptr=0x%x, agp_buf=0x%x",
+ dev_priv->cp_ring->handle,
+ dev_priv->ring_rptr->handle,
+ dev->agp_buffer_map->handle);
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+ } else
+#endif
+ {
+ dev_priv->cp_ring->handle =
+ (void *)(intptr_t)dev_priv->cp_ring->offset;
+ dev_priv->ring_rptr->handle =
+ (void *)(intptr_t)dev_priv->ring_rptr->offset;
+ dev->agp_buffer_map->handle =
+ (void *)(intptr_t)dev->agp_buffer_map->offset;
+
+ DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+ dev_priv->cp_ring->handle);
+ DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+ dev_priv->ring_rptr->handle);
+ DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+ dev->agp_buffer_map->handle);
+ }
+
+ dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION) &
+ 0xffff) << 16;
+ dev_priv->fb_size =
+ ((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
+ - dev_priv->fb_location;
+
+ dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+ ((dev_priv->front_offset + dev_priv->fb_location) >> 10));
+
+ dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+ ((dev_priv->back_offset + dev_priv->fb_location) >> 10));
+
+ dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+ ((dev_priv->depth_offset + dev_priv->fb_location) >> 10));
+
+ dev_priv->gart_size = init->gart_size;
+
+ /* New let's set the memory map ... */
+ if (dev_priv->new_memmap) {
+ u32 base = 0;
+
+ DRM_INFO("Setting GART location based on new memory map\n");
+
+ /*
+ * If using AGP, try to locate the AGP aperture at the same
+ * location in the card and on the bus, though we have to
+ * align it down.
+ */
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ base = dev->agp->base;
+ /* Check if valid */
+ if ((base + dev_priv->gart_size - 1) >=
+ dev_priv->fb_location &&
+ base < (dev_priv->fb_location +
+ dev_priv->fb_size - 1)) {
+ DRM_INFO("Can't use AGP base @0x%08lx,"
+ "won't fit\n", dev->agp->base);
+ base = 0;
+ }
+ }
+#endif
+ /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+ if (base == 0) {
+ base = dev_priv->fb_location + dev_priv->fb_size;
+ if (base < dev_priv->fb_location ||
+ ((base + dev_priv->gart_size) &
+ 0xfffffffful) < base)
+ base = dev_priv->fb_location -
+ dev_priv->gart_size;
+ }
+ dev_priv->gart_vm_start = base & 0xffc00000u;
+ if (dev_priv->gart_vm_start != base)
+ DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+ base, dev_priv->gart_vm_start);
+ } else {
+ DRM_INFO("Setting GART location based on old memory map\n");
+ dev_priv->gart_vm_start = dev_priv->fb_location +
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
+ }
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP)
+ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset -
+ dev->agp->base + dev_priv->gart_vm_start);
+ else
+#endif
+ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset -
+ (unsigned long)dev->sg->virtual + dev_priv->gart_vm_start);
+
+ DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+ DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
+ DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
+ dev_priv->gart_buffers_offset);
+
+ dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+ dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle +
+ init->ring_size / sizeof (u32));
+ dev_priv->ring.size = init->ring_size;
+ dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof (u32)) - 1;
+
+ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ /* Turn off PCI GART */
+ radeon_set_pcigart(dev_priv, 0);
+ } else
+#endif
+ {
+ /* if we have an offset set from userspace */
+ if (dev_priv->pcigart_offset) {
+ dev_priv->gart_info.bus_addr =
+ dev_priv->pcigart_offset + dev_priv->fb_location;
+ dev_priv->gart_info.mapping.offset =
+ dev_priv->gart_info.bus_addr;
+ dev_priv->gart_info.mapping.size =
+ RADEON_PCIGART_TABLE_SIZE;
+
+ drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
+ dev_priv->gart_info.addr =
+ dev_priv->gart_info.mapping.handle;
+
+ dev_priv->gart_info.is_pcie =
+ !!(dev_priv->flags & RADEON_IS_PCIE);
+ dev_priv->gart_info.gart_table_location =
+ DRM_ATI_GART_FB;
+
+ DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+ dev_priv->gart_info.addr, dev_priv->pcigart_offset);
+ } else {
+ dev_priv->gart_info.gart_table_location =
+ DRM_ATI_GART_MAIN;
+ dev_priv->gart_info.addr = NULL;
+ dev_priv->gart_info.bus_addr = 0;
+ if (dev_priv->flags & RADEON_IS_PCIE) {
+ DRM_ERROR("Cannot use PCI Express without "
+ "GART in FB memory\n");
+ (void) radeon_do_cleanup_cp(dev);
+ return (EINVAL);
+ }
+ }
+
+ if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
+ DRM_ERROR("failed to init PCI GART!\n");
+ (void) radeon_do_cleanup_cp(dev);
+ return (ENOMEM);
+ }
+
+ /* Turn on PCI GART */
+ radeon_set_pcigart(dev_priv, 1);
+ }
+
+ radeon_cp_load_microcode(dev_priv);
+ radeon_cp_init_ring_buffer(dev, dev_priv);
+
+ dev_priv->last_buf = 0;
+
+ (void) radeon_do_engine_reset(dev);
+ radeon_test_writeback(dev_priv);
+
+ return (0);
+}
+
+static int radeon_do_cleanup_cp(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ /*
+ * Make sure interrupts are disabled here because the uninstall ioctl
+ * may not have been called from userspace and after dev_private
+ * is freed, it's too late.
+ */
+ if (dev->irq_enabled)
+ (void) drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ if (dev_priv->cp_ring != NULL) {
+ drm_core_ioremapfree(dev_priv->cp_ring, dev);
+ dev_priv->cp_ring = NULL;
+ }
+ if (dev_priv->ring_rptr != NULL) {
+ drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+ dev_priv->ring_rptr = NULL;
+ }
+ if (dev->agp_buffer_map != NULL) {
+ drm_core_ioremapfree(dev->agp_buffer_map, dev);
+ dev->agp_buffer_map = NULL;
+ }
+ } else
+#endif
+ {
+
+ if (dev_priv->gart_info.bus_addr) {
+ /* Turn off PCI GART */
+ radeon_set_pcigart(dev_priv, 0);
+ if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
+ DRM_ERROR("failed to cleanup PCI GART!\n");
+ }
+
+ if (dev_priv->gart_info.gart_table_location ==
+ DRM_ATI_GART_FB) {
+ drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+ dev_priv->gart_info.addr = 0;
+ }
+ }
+ /* only clear to the start of flags */
+ (void) memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+ return (0);
+}
+
+/*
+ * This code will reinit the Radeon CP hardware after a resume from disc.
+ * AFAIK, it would be very difficult to pickle the state at suspend time, so
+ * here we make sure that all Radeon hardware initialisation is re-done without
+ * affecting running applications.
+ *
+ * Charl P. Botha <http://cpbotha.net>
+ */
+static int radeon_do_resume_cp(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv) {
+ DRM_ERROR("Called with no initialization\n");
+ return (EINVAL);
+ }
+
+ DRM_DEBUG("Starting radeon_do_resume_cp()\n");
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ /* Turn off PCI GART */
+ radeon_set_pcigart(dev_priv, 0);
+ } else
+#endif
+ {
+ /* Turn on PCI GART */
+ radeon_set_pcigart(dev_priv, 1);
+ }
+
+ radeon_cp_load_microcode(dev_priv);
+ radeon_cp_init_ring_buffer(dev, dev_priv);
+
+ (void) radeon_do_engine_reset(dev);
+
+ DRM_DEBUG("radeon_do_resume_cp() complete\n");
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+radeon_cp_init(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_init_t init;
+
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_init_32_t init32;
+
+ DRM_COPYFROM_WITH_RETURN(&init32, (void *) data,
+ sizeof (init32));
+ init.func = init32.func;
+ init.sarea_priv_offset = init32.sarea_priv_offset;
+ init.is_pci = init32.is_pci;
+ init.cp_mode = init32.cp_mode;
+ init.gart_size = init32.gart_size;
+ init.ring_size = init32.ring_size;
+ init.usec_timeout = init32.usec_timeout;
+ init.fb_bpp = init32.fb_bpp;
+ init.front_offset = init32.front_offset;
+ init.front_pitch = init32.front_pitch;
+ init.back_offset = init32.back_offset;
+ init.back_pitch = init32.back_pitch;
+ init.depth_bpp = init32.depth_bpp;
+ init.depth_offset = init32.depth_offset;
+ init.depth_pitch = init32.depth_pitch;
+ init.ring_offset = init32.ring_offset;
+ init.ring_rptr_offset = init32.ring_rptr_offset;
+ init.buffers_offset = init32.buffers_offset;
+ init.gart_textures_offset = init32.gart_textures_offset;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&init, (void *) data, sizeof (init));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+
+ if (init.func == RADEON_INIT_R300_CP)
+ r300_init_reg_flags();
+
+ switch (init.func) {
+ case RADEON_INIT_CP:
+ case RADEON_INIT_R200_CP:
+ case RADEON_INIT_R300_CP:
+ return (radeon_do_init_cp(dev, &init));
+ case RADEON_CLEANUP_CP:
+ return (radeon_do_cleanup_cp(dev));
+ }
+
+ return (EINVAL);
+}
+
+/*ARGSUSED*/
+int
+radeon_cp_start(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (dev_priv->cp_running) {
+ return (0);
+ }
+ if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
+ DRM_DEBUG("called with bogus CP mode (%d)\n",
+ dev_priv->cp_mode);
+ return (0);
+ }
+
+ radeon_do_cp_start(dev_priv);
+
+ return (0);
+}
+
+/*
+ * Stop the CP. The engine must have been idled before calling this
+ * routine.
+ */
+/*ARGSUSED*/
+int
+radeon_cp_stop(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_cp_stop_t stop;
+ int ret;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ DRM_COPYFROM_WITH_RETURN(&stop, (void *) data, sizeof (stop));
+
+ if (!dev_priv->cp_running)
+ return (0);
+
+ /*
+ * Flush any pending CP commands. This ensures any outstanding
+ * commands are exectuted by the engine before we turn it off.
+ */
+ if (stop.flush) {
+ radeon_do_cp_flush(dev_priv);
+ }
+
+ /*
+ * If we fail to make the engine go idle, we return an error
+ * code so that the DRM ioctl wrapper can try again.
+ */
+ if (stop.idle) {
+ ret = radeon_do_cp_idle(dev_priv);
+ if (ret)
+ return (ret);
+ }
+
+ /*
+ * Finally, we can turn off the CP. If the engine isn't idle,
+ * we will get some dropped triangles as they won't be fully
+ * rendered before the CP is shut down.
+ */
+ radeon_do_cp_stop(dev_priv);
+
+ /* Reset the engine */
+ (void) radeon_do_engine_reset(dev);
+
+ return (0);
+}
+
+void
+radeon_do_release(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int i, ret;
+
+ if (dev_priv) {
+ if (dev_priv->cp_running) {
+ /* Stop the cp */
+ while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
+ DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+ schedule();
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version > 500000
+ msleep(&ret, &dev->dev_lock, PZERO,
+ "rdnrel", 1);
+#else
+#if defined(__SOLARIS__) || defined(sun)
+ (void) drv_usectohz(5);
+#else
+ tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+#endif
+#endif
+ }
+ radeon_do_cp_stop(dev_priv);
+ (void) radeon_do_engine_reset(dev);
+ }
+
+ /* Disable *all* interrupts */
+ /* remove this after permanent addmaps */
+ if (dev_priv->mmio)
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+ if (dev_priv->mmio) { /* remove all surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
+ 16 * i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
+ 16 * i, 0);
+ }
+ }
+
+ /* Free memory heap structures */
+ radeon_mem_takedown(&(dev_priv->gart_heap));
+ radeon_mem_takedown(&(dev_priv->fb_heap));
+
+ /* deallocate kernel resources */
+ (void) radeon_do_cleanup_cp(dev);
+ }
+}
+
+/* Just reset the CP ring. Called as part of an X Server engine reset. */
+/*ARGSUSED*/
+int
+radeon_cp_reset(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_DEBUG("radeon_cp_reset called before init done\n");
+ return (EINVAL);
+ }
+
+ radeon_do_cp_reset(dev_priv);
+
+ /* The CP is no longer running after an engine reset */
+ dev_priv->cp_running = 0;
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+radeon_cp_idle(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ return (radeon_do_cp_idle(dev_priv));
+}
+
+/* Added by Charl P. Botha to call radeon_do_resume_cp(). */
+/*ARGSUSED*/
+int
+radeon_cp_resume(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+
+ return (radeon_do_resume_cp(dev));
+}
+
+/*ARGSUSED*/
+int
+radeon_engine_reset(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ return (radeon_do_engine_reset(dev));
+}
+
+/*
+ * Fullscreen mode
+ */
+
+/* KW: Deprecated to say the least: */
+/*ARGSUSED*/
+int
+radeon_fullscreen(DRM_IOCTL_ARGS)
+{
+ return (0);
+}
+
+/*
+ * Freelist management
+ */
+
+/*
+ * Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
+ * bufs until freelist code is used. Note this hides a problem with
+ * the scratch register * (used to keep track of last buffer
+ * completed) being written to before * the last buffer has actually
+ * completed rendering.
+ *
+ * KW: It's also a good way to find free buffers quickly.
+ *
+ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
+ * sleep. However, bugs in older versions of radeon_accel.c mean that
+ * we essentially have to do this, else old clients will break.
+ *
+ * However, it does leave open a potential deadlock where all the
+ * buffers are held by other clients, which can't release them because
+ * they can't get the lock.
+ */
+
+drm_buf_t *
+radeon_freelist_get(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv;
+ drm_buf_t *buf;
+ int i, t;
+ int start;
+
+ if (++dev_priv->last_buf >= dma->buf_count)
+ dev_priv->last_buf = 0;
+
+ start = dev_priv->last_buf;
+
+ for (t = 0; t < dev_priv->usec_timeout; t++) {
+ u32 done_age = GET_SCRATCH(1);
+ DRM_DEBUG("done_age = %d\n", done_age);
+ for (i = start; i < dma->buf_count; i++) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+ if (buf->filp == 0 || (buf->pending &&
+ buf_priv->age <= done_age)) {
+ dev_priv->stats.requested_bufs++;
+ buf->pending = 0;
+ return (buf);
+ }
+ start = 0;
+ }
+
+ if (t) {
+ DRM_UDELAY(1);
+ dev_priv->stats.freelist_loops++;
+ }
+ }
+
+ DRM_DEBUG("returning NULL!\n");
+ return (NULL);
+}
+
+#if 0
+drm_buf_t *
+radeon_freelist_get(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv;
+ drm_buf_t *buf;
+ int i, t;
+ int start;
+ u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
+
+ if (++dev_priv->last_buf >= dma->buf_count)
+ dev_priv->last_buf = 0;
+
+ start = dev_priv->last_buf;
+ dev_priv->stats.freelist_loops++;
+
+ for (t = 0; t < 2; t++) {
+ for (i = start; i < dma->buf_count; i++) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+ if (buf->filp == 0 || (buf->pending &&
+ buf_priv->age <= done_age)) {
+ dev_priv->stats.requested_bufs++;
+ buf->pending = 0;
+ return (buf);
+ }
+ }
+ start = 0;
+ }
+
+ return (NULL);
+}
+#endif
+
+void
+radeon_freelist_reset(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->last_buf = 0;
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[i];
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ buf_priv->age = 0;
+ }
+}
+
+/*
+ * CP command submission
+ */
+int
+radeon_wait_ring(drm_radeon_private_t *dev_priv, int n)
+{
+ drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+ int i;
+ u32 last_head = GET_RING_HEAD(dev_priv);
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ u32 head = GET_RING_HEAD(dev_priv);
+
+ ring->space = (head - ring->tail) * sizeof (u32);
+ if (ring->space <= 0)
+ ring->space += ring->size;
+ if (ring->space > n)
+ return (0);
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ if (head != last_head)
+ i = 0;
+ last_head = head;
+
+ DRM_UDELAY(1);
+ }
+
+ /* FIXME: This return value is ignored in the BEGIN_RING macro! */
+#if RADEON_FIFO_DEBUG
+ cmn_err(CE_WARN, "radeon_wait_ring failed\n");
+ radeon_status(dev_priv);
+ DRM_ERROR("failed!\n");
+#endif
+ return (EBUSY);
+}
+
+static int
+radeon_cp_get_buffers(drm_file_t *filp, drm_device_t *dev, drm_dma_t *d)
+{
+ int i;
+ drm_buf_t *buf;
+
+ for (i = d->granted_count; i < d->request_count; i++) {
+ buf = radeon_freelist_get(dev);
+ if (!buf)
+ return (EBUSY); /* NOTE: broken client */
+
+ buf->filp = filp;
+
+ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+ sizeof (buf->idx)))
+ return (EFAULT);
+ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+ sizeof (buf->total)))
+ return (EFAULT);
+
+ d->granted_count++;
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+radeon_cp_buffers(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ int ret = 0;
+ drm_dma_t d;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_dma_32_t dma32;
+
+ DRM_COPYFROM_WITH_RETURN(&dma32, (void *)data, sizeof (dma32));
+ d.context = dma32.context;
+ d.send_count = dma32.send_count;
+ d.send_indices = (void *)(uintptr_t)dma32.send_indices;
+ d.send_sizes = (void *)(uintptr_t)dma32.send_sizes;
+ d.flags = dma32.flags;
+ d.request_count = dma32.request_count;
+ d.request_size = dma32.request_size;
+ d.request_indices = (void *)(uintptr_t)dma32.request_indices;
+ d.request_sizes = (void *)(uintptr_t)dma32.request_sizes;
+ d.granted_count = dma32.granted_count;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&d, (void *)data, sizeof (d));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ /* Please don't send us buffers. */
+ if (d.send_count != 0) {
+ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+ DRM_CURRENTPID, d.send_count);
+ return (EINVAL);
+ }
+
+ /* We'll send you buffers. */
+ if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+ DRM_CURRENTPID, d.request_count, dma->buf_count);
+ return (EINVAL);
+ }
+
+ d.granted_count = 0;
+
+ if (d.request_count) {
+ ret = radeon_cp_get_buffers(fpriv, dev, &d);
+ }
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_dma_32_t dma32;
+
+ dma32.context = d.context;
+ dma32.send_count = d.send_count;
+ dma32.send_indices = (uintptr_t)d.send_indices;
+ dma32.send_sizes = (uintptr_t)d.send_sizes;
+ dma32.flags = d.flags;
+ dma32.request_count = d.request_count;
+ dma32.request_size = d.request_size;
+ dma32.request_indices = (uintptr_t)d.request_indices;
+ dma32.request_sizes = (uintptr_t)d.request_sizes;
+ dma32.granted_count = d.granted_count;
+ DRM_COPYTO_WITH_RETURN((void *)data, &dma32, sizeof (dma32));
+ } else {
+#endif
+ DRM_COPYTO_WITH_RETURN((void *)data, &d, sizeof (d));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ return (ret);
+}
+
+int
+radeon_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ drm_radeon_private_t *dev_priv;
+ int ret = 0;
+
+ dev_priv = drm_alloc(sizeof (drm_radeon_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return (ENOMEM);
+
+ (void) memset(dev_priv, 0, sizeof (drm_radeon_private_t));
+ dev->dev_private = (void *)dev_priv;
+ dev_priv->flags = (int)flags;
+
+ switch (flags & RADEON_FAMILY_MASK) {
+ case CHIP_R100:
+ case CHIP_RV200:
+ case CHIP_R200:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_R420:
+ case CHIP_RV410:
+ dev_priv->flags |= RADEON_HAS_HIERZ;
+ break;
+ default:
+ /* all other chips have no hierarchical z buffer */
+ break;
+ }
+
+ if (drm_device_is_agp(dev))
+ dev_priv->flags |= RADEON_IS_AGP;
+ else if (drm_device_is_pcie(dev))
+ dev_priv->flags |= RADEON_IS_PCIE;
+ else
+ dev_priv->flags |= RADEON_IS_PCI;
+
+ return (ret);
+}
+
+/*
+ * Create mappings for registers and framebuffer so userland doesn't necessarily
+ * have to find them.
+ */
+int
+radeon_driver_firstopen(struct drm_device *dev)
+{
+ int ret;
+ drm_local_map_t *map;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ /* dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; */
+
+ ret = drm_addmap(dev, (uint_t)drm_get_resource_start(dev, 2),
+ (uint_t)drm_get_resource_len(dev, 2), _DRM_REGISTERS,
+ _DRM_READ_ONLY, &dev_priv->mmio);
+
+ if (ret != 0) {
+ cmn_err(CE_WARN, "radeon_driver_firstopen: "
+ "failed to mmap BAR2 addr=0x%x, len=0x%x",
+ (uint_t)drm_get_resource_start(dev, 2),
+ (uint_t)drm_get_resource_len(dev, 2));
+ return (ret);
+ }
+
+ ret = drm_addmap(dev, (uint_t)drm_get_resource_start(dev, 0),
+ (uint_t)drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
+ _DRM_WRITE_COMBINING, &map);
+ if (ret != 0)
+ return (ret);
+
+ return (0);
+}
+
+int
+radeon_driver_unload(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+ drm_free(dev_priv, sizeof (*dev_priv), DRM_MEM_DRIVER);
+
+ dev->dev_private = NULL;
+ return (0);
+}
diff --git a/usr/src/uts/intel/io/drm/radeon_drm.h b/usr/src/uts/intel/io/drm/radeon_drm.h
new file mode 100644
index 0000000..b68eff3
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_drm.h
@@ -0,0 +1,800 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifndef __RADEON_DRM_H__
+#define __RADEON_DRM_H__
+
+/*
+ * WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (radeon_sarea.h)
+ */
+#ifndef __RADEON_SAREA_DEFINES__
+#define __RADEON_SAREA_DEFINES__
+
+/*
+ * Old style state flags, required for sarea interface (1.1 and 1.2
+ * clears) and 1.2 drm_vertex2 ioctl.
+ */
+#define RADEON_UPLOAD_CONTEXT 0x00000001
+#define RADEON_UPLOAD_VERTFMT 0x00000002
+#define RADEON_UPLOAD_LINE 0x00000004
+#define RADEON_UPLOAD_BUMPMAP 0x00000008
+#define RADEON_UPLOAD_MASKS 0x00000010
+#define RADEON_UPLOAD_VIEWPORT 0x00000020
+#define RADEON_UPLOAD_SETUP 0x00000040
+#define RADEON_UPLOAD_TCL 0x00000080
+#define RADEON_UPLOAD_MISC 0x00000100
+#define RADEON_UPLOAD_TEX0 0x00000200
+#define RADEON_UPLOAD_TEX1 0x00000400
+#define RADEON_UPLOAD_TEX2 0x00000800
+#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
+#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
+#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
+#define RADEON_UPLOAD_CLIPRECTS 0x00008000
+ /* handled client-side */
+#define RADEON_REQUIRE_QUIESCENCE 0x00010000
+#define RADEON_UPLOAD_ZBIAS 0x00020000
+ /* version 1.2 and newer */
+#define RADEON_UPLOAD_ALL 0x003effff
+#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
+
+/*
+ * New style per-packet identifiers for use in cmd_buffer ioctl with
+ * the RADEON_EMIT_PACKET command. Comments relate new packets to old
+ * state bits and the packet size:
+ */
+#define RADEON_EMIT_PP_MISC 0 /* context/7 */
+#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
+#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
+#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
+#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
+#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
+#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
+#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
+#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
+#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
+#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
+#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
+#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
+#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
+#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
+#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
+#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
+#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
+#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
+#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
+#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
+#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
+#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
+#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
+#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
+#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
+#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
+#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
+#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
+#define R200_EMIT_VAP_CTL 32 /* vap/1 */
+#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
+#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
+#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
+#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
+#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
+#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
+#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
+#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
+#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
+#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
+#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
+#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
+#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
+#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
+#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
+#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
+#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
+#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
+#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
+#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
+#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
+#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
+#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
+#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
+#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
+#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
+#define R200_EMIT_PP_CUBIC_FACES_0 61
+#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
+#define R200_EMIT_PP_CUBIC_FACES_1 63
+#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
+#define R200_EMIT_PP_CUBIC_FACES_2 65
+#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
+#define R200_EMIT_PP_CUBIC_FACES_3 67
+#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
+#define R200_EMIT_PP_CUBIC_FACES_4 69
+#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
+#define R200_EMIT_PP_CUBIC_FACES_5 71
+#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
+#define RADEON_EMIT_PP_TEX_SIZE_0 73
+#define RADEON_EMIT_PP_TEX_SIZE_1 74
+#define RADEON_EMIT_PP_TEX_SIZE_2 75
+#define R200_EMIT_RB3D_BLENDCOLOR 76
+#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
+#define RADEON_EMIT_PP_CUBIC_FACES_0 78
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
+#define RADEON_EMIT_PP_CUBIC_FACES_1 80
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
+#define RADEON_EMIT_PP_CUBIC_FACES_2 82
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
+#define R200_EMIT_PP_TRI_PERF_CNTL 84
+#define R200_EMIT_PP_AFS_0 85
+#define R200_EMIT_PP_AFS_1 86
+#define R200_EMIT_ATF_TFACTOR 87
+#define R200_EMIT_PP_TXCTLALL_0 88
+#define R200_EMIT_PP_TXCTLALL_1 89
+#define R200_EMIT_PP_TXCTLALL_2 90
+#define R200_EMIT_PP_TXCTLALL_3 91
+#define R200_EMIT_PP_TXCTLALL_4 92
+#define R200_EMIT_PP_TXCTLALL_5 93
+#define R200_EMIT_VAP_PVS_CNTL 94
+#define RADEON_MAX_STATE_PACKETS 95
+
+/*
+ * Commands understood by cmd_buffer ioctl. More can be added but
+ * obviously these can't be removed or changed:
+ */
+#define RADEON_CMD_PACKET 1
+ /* emit one of the register packets above */
+#define RADEON_CMD_SCALARS 2 /* emit scalar data */
+#define RADEON_CMD_VECTORS 3 /* emit vector data */
+#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
+#define RADEON_CMD_PACKET3 5 /* emit hw packet */
+#define RADEON_CMD_PACKET3_CLIP 6
+ /* emit hw packet wrapped in cliprects */
+#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
+
+/*
+ * emit hw wait commands -- note:
+ * doesn't make the cpu wait, just
+ * the graphics hardware
+ */
+#define RADEON_CMD_WAIT 8
+
+#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
+typedef union {
+ int i;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, packet_id, pad0, pad1;
+ } packet;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } scalars;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } vectors;
+ struct {
+ unsigned char cmd_type, addr_lo, addr_hi, count;
+ } veclinear;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+} drm_radeon_cmd_header_t;
+
+#define RADEON_WAIT_2D 0x1
+#define RADEON_WAIT_3D 0x2
+
+/* Allowed parameters for R300_CMD_PACKET3 */
+#define R300_CMD_PACKET3_CLEAR 0
+#define R300_CMD_PACKET3_RAW 1
+
+/*
+ * Commands understood by cmd_buffer ioctl for R300.
+ * The interface has not been stabilized, so some of these may be removed
+ * and eventually reordered before stabilization.
+ */
+#define R300_CMD_PACKET0 1
+#define R300_CMD_VPU 2 /* emit vertex program upload */
+#define R300_CMD_PACKET3 3 /* emit a packet3 */
+
+/* emit sequence ending 3d rendering */
+#define R300_CMD_END3D 4
+
+#define R300_CMD_CP_DELAY 5
+#define R300_CMD_DMA_DISCARD 6
+#define R300_CMD_WAIT 7
+#define R300_WAIT_2D 0x1
+#define R300_WAIT_3D 0x2
+#define R300_WAIT_2D_CLEAN 0x3
+#define R300_WAIT_3D_CLEAN 0x4
+#define R300_CMD_SCRATCH 8
+/*
+ * sys/user.h defines u
+ */
+typedef union {
+ unsigned int u;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, count, reglo, reghi;
+ } packet0;
+ struct {
+ unsigned char cmd_type, count, adrlo, adrhi;
+ } vpu;
+ struct {
+ unsigned char cmd_type, packet, pad0, pad1;
+ } packet3;
+ struct {
+ unsigned char cmd_type, packet;
+ unsigned short count; /* amount of packet2 to emit */
+ } delay;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+ struct {
+ unsigned char cmd_type, reg, n_bufs, flags;
+ } scratch;
+} drm_r300_cmd_header_t;
+
+#define RADEON_FRONT 0x1
+#define RADEON_BACK 0x2
+#define RADEON_DEPTH 0x4
+#define RADEON_STENCIL 0x8
+#define RADEON_CLEAR_FASTZ 0x80000000
+#define RADEON_USE_HIERZ 0x40000000
+#define RADEON_USE_COMP_ZBUF 0x20000000
+
+/* Primitive types */
+#define RADEON_POINTS 0x1
+#define RADEON_LINES 0x2
+#define RADEON_LINE_STRIP 0x3
+#define RADEON_TRIANGLES 0x4
+#define RADEON_TRIANGLE_FAN 0x5
+#define RADEON_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size */
+#define RADEON_BUFFER_SIZE 65536
+
+/* Byte offsets for indirect buffer data */
+#define RADEON_INDEX_PRIM_OFFSET 20
+
+#define RADEON_SCRATCH_REG_OFFSET 32
+
+#define RADEON_NR_SAREA_CLIPRECTS 12
+
+/*
+ * There are 2 heaps (local/GART). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define RADEON_LOCAL_TEX_HEAP 0
+#define RADEON_GART_TEX_HEAP 1
+#define RADEON_NR_TEX_HEAPS 2
+#define RADEON_NR_TEX_REGIONS 64
+#define RADEON_LOG_TEX_GRANULARITY 16
+
+#define RADEON_MAX_TEXTURE_LEVELS 12
+#define RADEON_MAX_TEXTURE_UNITS 3
+
+#define RADEON_MAX_SURFACES 8
+
+/*
+ * Blits have strict offset rules. All blit offset must be aligned on
+ * a 1K-byte boundary.
+ */
+#define RADEON_OFFSET_SHIFT 10
+#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
+#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
+
+#endif /* __RADEON_SAREA_DEFINES__ */
+
+typedef struct {
+ unsigned int red;
+ unsigned int green;
+ unsigned int blue;
+ unsigned int alpha;
+} radeon_color_regs_t;
+
+typedef struct {
+ /* Context state */
+ unsigned int pp_misc; /* 0x1c14 */
+ unsigned int pp_fog_color;
+ unsigned int re_solid_color;
+ unsigned int rb3d_blendcntl;
+ unsigned int rb3d_depthoffset;
+ unsigned int rb3d_depthpitch;
+ unsigned int rb3d_zstencilcntl;
+
+ unsigned int pp_cntl; /* 0x1c38 */
+ unsigned int rb3d_cntl;
+ unsigned int rb3d_coloroffset;
+ unsigned int re_width_height;
+ unsigned int rb3d_colorpitch;
+ unsigned int se_cntl;
+
+ /* Vertex format state */
+ unsigned int se_coord_fmt; /* 0x1c50 */
+
+ /* Line state */
+ unsigned int re_line_pattern; /* 0x1cd0 */
+ unsigned int re_line_state;
+
+ unsigned int se_line_width; /* 0x1db8 */
+
+ /* Bumpmap state */
+ unsigned int pp_lum_matrix; /* 0x1d00 */
+
+ unsigned int pp_rot_matrix_0; /* 0x1d58 */
+ unsigned int pp_rot_matrix_1;
+
+ /* Mask state */
+ unsigned int rb3d_stencilrefmask; /* 0x1d7c */
+ unsigned int rb3d_ropcntl;
+ unsigned int rb3d_planemask;
+
+ /* Viewport state */
+ unsigned int se_vport_xscale; /* 0x1d98 */
+ unsigned int se_vport_xoffset;
+ unsigned int se_vport_yscale;
+ unsigned int se_vport_yoffset;
+ unsigned int se_vport_zscale;
+ unsigned int se_vport_zoffset;
+
+ /* Setup state */
+ unsigned int se_cntl_status; /* 0x2140 */
+
+ /* Misc state */
+ unsigned int re_top_left; /* 0x26c0 */
+ unsigned int re_misc;
+} drm_radeon_context_regs_t;
+
+typedef struct {
+ /* Zbias state */
+ unsigned int se_zbias_factor; /* 0x1dac */
+ unsigned int se_zbias_constant;
+} drm_radeon_context2_regs_t;
+
+/* Setup registers for each texture unit */
+typedef struct {
+ unsigned int pp_txfilter;
+ unsigned int pp_txformat;
+ unsigned int pp_txoffset;
+ unsigned int pp_txcblend;
+ unsigned int pp_txablend;
+ unsigned int pp_tfactor;
+ unsigned int pp_border_color;
+} drm_radeon_texture_regs_t;
+
+typedef struct {
+ unsigned int start;
+ unsigned int finish;
+ unsigned int prim:8;
+ unsigned int stateidx:8;
+ unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
+ unsigned int vc_format; /* vertex format */
+} drm_radeon_prim_t;
+
+typedef struct {
+ drm_radeon_context_regs_t context;
+ drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
+ drm_radeon_context2_regs_t context2;
+ unsigned int dirty;
+} drm_radeon_state_t;
+
+typedef struct {
+ /*
+ * The channel for communication of state information to the
+ * kernel on firing a vertex buffer with either of the
+ * obsoleted vertex/index ioctls.
+ */
+ drm_radeon_context_regs_t context_state;
+ drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof. */
+ drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients. */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+ unsigned int last_clear;
+
+ drm_tex_region_t
+ tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + 1];
+ unsigned int tex_age[RADEON_NR_TEX_HEAPS];
+ int ctx_owner;
+ int pfState; /* number of 3d windows (0,1,2ormore) */
+ int pfCurrentPage; /* which buffer is being displayed? */
+ int crtc2_base; /* CRTC2 frame offset */
+ int tiling_enabled; /* set by drm, read by 2d + 3d clients */
+} drm_radeon_sarea_t;
+
+/*
+ * WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmRadeon.h)
+ *
+ * KW: actually it's illegal to change any of this (backwards compatibility).
+ */
+
+/*
+ * Radeon specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_RADEON_CP_INIT 0x00
+#define DRM_RADEON_CP_START 0x01
+#define DRM_RADEON_CP_STOP 0x02
+#define DRM_RADEON_CP_RESET 0x03
+#define DRM_RADEON_CP_IDLE 0x04
+#define DRM_RADEON_RESET 0x05
+#define DRM_RADEON_FULLSCREEN 0x06
+#define DRM_RADEON_SWAP 0x07
+#define DRM_RADEON_CLEAR 0x08
+#define DRM_RADEON_VERTEX 0x09
+#define DRM_RADEON_INDICES 0x0A
+#define DRM_RADEON_NOT_USED
+#define DRM_RADEON_STIPPLE 0x0C
+#define DRM_RADEON_INDIRECT 0x0D
+#define DRM_RADEON_TEXTURE 0x0E
+#define DRM_RADEON_VERTEX2 0x0F
+#define DRM_RADEON_CMDBUF 0x10
+#define DRM_RADEON_GETPARAM 0x11
+#define DRM_RADEON_FLIP 0x12
+#define DRM_RADEON_ALLOC 0x13
+#define DRM_RADEON_FREE 0x14
+#define DRM_RADEON_INIT_HEAP 0x15
+#define DRM_RADEON_IRQ_EMIT 0x16
+#define DRM_RADEON_IRQ_WAIT 0x17
+#define DRM_RADEON_CP_RESUME 0x18
+#define DRM_RADEON_SETPARAM 0x19
+#define DRM_RADEON_SURF_ALLOC 0x1a
+#define DRM_RADEON_SURF_FREE 0x1b
+
+#define DRM_IOCTL_RADEON_CP_INIT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
+#define DRM_IOCTL_RADEON_CP_START \
+ DRM_IO(DRM_COMMAND_BASE + DRM_RADEON_CP_START)
+#define DRM_IOCTL_RADEON_CP_STOP \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
+#define DRM_IOCTL_RADEON_CP_RESET \
+ DRM_IO(DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
+#define DRM_IOCTL_RADEON_CP_IDLE \
+ DRM_IO(DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
+#define DRM_IOCTL_RADEON_RESET \
+ DRM_IO(DRM_COMMAND_BASE + DRM_RADEON_RESET)
+#define DRM_IOCTL_RADEON_FULLSCREEN \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, \
+ drm_radeon_fullscreen_t)
+#define DRM_IOCTL_RADEON_SWAP \
+ DRM_IO(DRM_COMMAND_BASE + DRM_RADEON_SWAP)
+#define DRM_IOCTL_RADEON_CLEAR \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
+#define DRM_IOCTL_RADEON_VERTEX \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_INDICES \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
+#define DRM_IOCTL_RADEON_STIPPLE \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
+#define DRM_IOCTL_RADEON_INDIRECT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
+#define DRM_IOCTL_RADEON_TEXTURE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
+#define DRM_IOCTL_RADEON_VERTEX2 \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
+#define DRM_IOCTL_RADEON_CMDBUF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
+#define DRM_IOCTL_RADEON_GETPARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
+#define DRM_IOCTL_RADEON_FLIP \
+ DRM_IO(DRM_COMMAND_BASE + DRM_RADEON_FLIP)
+#define DRM_IOCTL_RADEON_ALLOC \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
+#define DRM_IOCTL_RADEON_FREE \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
+#define DRM_IOCTL_RADEON_INIT_HEAP \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, \
+ drm_radeon_mem_init_heap_t)
+#define DRM_IOCTL_RADEON_IRQ_EMIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
+#define DRM_IOCTL_RADEON_IRQ_WAIT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
+#define DRM_IOCTL_RADEON_CP_RESUME \
+ DRM_IO(DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
+#define DRM_IOCTL_RADEON_SETPARAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
+#define DRM_IOCTL_RADEON_SURF_ALLOC \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, \
+ drm_radeon_surface_alloc_t)
+#define DRM_IOCTL_RADEON_SURF_FREE \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, \
+ drm_radeon_surface_free_t)
+
+typedef struct drm_radeon_init {
+ enum {
+ RADEON_INIT_CP = 0x01,
+ RADEON_CLEANUP_CP = 0x02,
+ RADEON_INIT_R200_CP = 0x03,
+ RADEON_INIT_R300_CP = 0x04
+ } func;
+ unsigned long sarea_priv_offset;
+ int is_pci; /* for overriding only */
+ int cp_mode;
+ int gart_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned long fb_offset DEPRECATED; /* deprecated */
+ unsigned long mmio_offset DEPRECATED; /* deprecated */
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long gart_textures_offset;
+} drm_radeon_init_t;
+
+typedef struct drm_radeon_cp_stop {
+ int flush;
+ int idle;
+} drm_radeon_cp_stop_t;
+
+typedef struct drm_radeon_fullscreen {
+ enum {
+ RADEON_INIT_FULLSCREEN = 0x01,
+ RADEON_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_radeon_fullscreen_t;
+
+#define CLEAR_X1 0
+#define CLEAR_Y1 1
+#define CLEAR_X2 2
+#define CLEAR_Y2 3
+#define CLEAR_DEPTH 4
+
+typedef union drm_radeon_clear_rect {
+ float f[5];
+ unsigned int ui[5];
+} drm_radeon_clear_rect_t;
+
+typedef struct drm_radeon_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask; /* misnamed field: should be stencil */
+ drm_radeon_clear_rect_t __user *depth_boxes;
+} drm_radeon_clear_t;
+
+typedef struct drm_radeon_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_radeon_vertex_t;
+
+typedef struct drm_radeon_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_radeon_indices_t;
+
+/*
+ * v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
+ * - allows multiple primitives and state changes in a single ioctl
+ * - supports driver change to emit native primitives
+ */
+typedef struct drm_radeon_vertex2 {
+ int idx; /* Index of vertex buffer */
+ int discard; /* Client finished with buffer? */
+ int nr_states;
+ drm_radeon_state_t __user *state;
+ int nr_prims;
+ drm_radeon_prim_t __user *prim;
+} drm_radeon_vertex2_t;
+
+/*
+ * v1.3 - obsoletes drm_radeon_vertex2
+ * - allows arbitarily large cliprect list
+ * - allows updating of tcl packet, vector and scalar state
+ * - allows memory-efficient description of state updates
+ * - allows state to be emitted without a primitive
+ * (for clears, ctx switches)
+ * - allows more than one dma buffer to be referenced per ioctl
+ * - supports tcl driver
+ * - may be extended in future versions with new cmd types, packets
+ */
+typedef struct drm_radeon_cmd_buffer {
+ int bufsz;
+ char __user *buf;
+ int nbox;
+ drm_clip_rect_t __user *boxes;
+} drm_radeon_cmd_buffer_t;
+
+typedef struct drm_radeon_tex_image {
+ unsigned int x, y; /* Blit coordinates */
+ unsigned int width, height;
+ const void __user *data;
+} drm_radeon_tex_image_t;
+
+typedef struct drm_radeon_texture {
+ unsigned int offset;
+ int pitch;
+ int format;
+ int width; /* Texture image coordinates */
+ int height;
+ drm_radeon_tex_image_t __user *image;
+} drm_radeon_texture_t;
+
+typedef struct drm_radeon_stipple {
+ unsigned int __user *mask;
+} drm_radeon_stipple_t;
+
+typedef struct drm_radeon_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_radeon_indirect_t;
+
+/* enum for card type parameters */
+#define RADEON_CARD_PCI 0
+#define RADEON_CARD_AGP 1
+#define RADEON_CARD_PCIE 2
+
+/*
+ * 1.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+
+/* card offset of 1st GART buffer */
+#define RADEON_PARAM_GART_BUFFER_OFFSET 1
+
+#define RADEON_PARAM_LAST_FRAME 2
+#define RADEON_PARAM_LAST_DISPATCH 3
+#define RADEON_PARAM_LAST_CLEAR 4
+/* Added with DRM version 1.6. */
+#define RADEON_PARAM_IRQ_NR 5
+#define RADEON_PARAM_GART_BASE 6 /* offset of GART base */
+/* Added with DRM version 1.8. */
+#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
+#define RADEON_PARAM_STATUS_HANDLE 8
+#define RADEON_PARAM_SAREA_HANDLE 9
+#define RADEON_PARAM_GART_TEX_HANDLE 10
+#define RADEON_PARAM_SCRATCH_OFFSET 11
+#define RADEON_PARAM_CARD_TYPE 12
+#define RADEON_PARAM_VBLANK_CRTC 13
+#define RADEON_PARAM_FB_LOCATION 14
+
+typedef struct drm_radeon_getparam {
+ int param;
+ void __user *value;
+} drm_radeon_getparam_t;
+
+/* 1.6: Set up a memory manager for regions of shared memory: */
+#define RADEON_MEM_REGION_GART 1
+#define RADEON_MEM_REGION_FB 2
+
+typedef struct drm_radeon_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int __user *region_offset; /* offset from start of fb or GART */
+} drm_radeon_mem_alloc_t;
+
+typedef struct drm_radeon_mem_free {
+ int region;
+ int region_offset;
+} drm_radeon_mem_free_t;
+
+typedef struct drm_radeon_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_radeon_mem_init_heap_t;
+
+/* 1.6: Userspace can request & wait on irq's: */
+typedef struct drm_radeon_irq_emit {
+ int __user *irq_seq;
+} drm_radeon_irq_emit_t;
+
+typedef struct drm_radeon_irq_wait {
+ int irq_seq;
+} drm_radeon_irq_wait_t;
+
+/*
+ * 1.10: Clients tell the DRM where they think the framebuffer is located in
+ * the card's address space, via a new generic ioctl to set parameters
+ */
+
+typedef struct drm_radeon_setparam {
+ unsigned int param;
+ int64_t value;
+} drm_radeon_setparam_t;
+
+/* determined framebuffer location */
+#define RADEON_SETPARAM_FB_LOCATION 1
+
+/* enable/disable color tiling */
+#define RADEON_SETPARAM_SWITCH_TILING 2
+
+/* PCI Gart Location */
+#define RADEON_SETPARAM_PCIGART_LOCATION 3
+
+/* Use new memory map */
+#define RADEON_SETPARAM_NEW_MEMMAP 4
+
+/* PCI GART Table Size */
+#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5
+
+/* VBLANK CRTC */
+#define RADEON_SETPARAM_VBLANK_CRTC 6
+
+
+/* 1.14: Clients can allocate/free a surface */
+typedef struct drm_radeon_surface_alloc {
+ unsigned int address;
+ unsigned int size;
+ unsigned int flags;
+} drm_radeon_surface_alloc_t;
+
+typedef struct drm_radeon_surface_free {
+ unsigned int address;
+} drm_radeon_surface_free_t;
+
+#define DRM_RADEON_VBLANK_CRTC1 1
+#define DRM_RADEON_VBLANK_CRTC2 2
+
+#endif /* __RADEON_DRM_H__ */
diff --git a/usr/src/uts/intel/io/drm/radeon_drv.c b/usr/src/uts/intel/io/drm/radeon_drv.c
new file mode 100644
index 0000000..8bc827c
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_drv.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
+ * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "drm_pciids.h"
+
+int radeon_no_wb = 1;
+
+/*
+ * cb_ops entrypoint
+ */
+extern struct cb_ops drm_cb_ops;
+
+/* drv_PCI_IDs comes from drm_pciids.h */
+static drm_pci_id_list_t radeon_pciidlist[] = {
+ radeon_PCI_IDS
+};
+
+/*
+ * module entrypoint
+ */
+static int radeon_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
+static int radeon_attach(dev_info_t *, ddi_attach_cmd_t);
+static int radeon_detach(dev_info_t *, ddi_detach_cmd_t);
+
+extern void radeon_init_ioctl_arrays(void);
+extern uint_t radeon_driver_irq_handler(caddr_t);
+extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
+
+/*
+ * Local routines
+ */
+static void radeon_configure(drm_driver_t *);
+
+/*
+ * DRM driver
+ */
+static drm_driver_t radeon_driver = {0};
+
+static struct dev_ops radeon_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ radeon_info, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ radeon_attach, /* devo_attach */
+ radeon_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &drm_cb_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ NULL, /* power */
+ ddi_quiesce_not_supported, /* devo_quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* drv_modops */
+ "radeon DRM driver", /* drv_linkinfo */
+ &radeon_dev_ops, /* drv_dev_ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *) &modldrv, NULL
+};
+
+
+/*
+ * softstate head
+ */
+static void *radeon_statep;
+
+int
+_init(void)
+{
+ int error;
+
+ radeon_configure(&radeon_driver);
+
+ if ((error = ddi_soft_state_init(&radeon_statep,
+ sizeof (drm_device_t), DRM_MAX_INSTANCES)) != 0)
+ return (error);
+
+ if ((error = mod_install(&modlinkage)) != 0) {
+ ddi_soft_state_fini(&radeon_statep);
+ return (error);
+ }
+
+ return (error);
+
+} /* _init() */
+
+int
+_fini(void)
+{
+ int error;
+
+ if ((error = mod_remove(&modlinkage)) != 0)
+ return (error);
+
+ (void) ddi_soft_state_fini(&radeon_statep);
+
+ return (0);
+
+} /* _fini() */
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+
+} /* _info() */
+
+
+static int
+radeon_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ drm_device_t *statep;
+ void *handle;
+ int unit;
+
+ if (cmd != DDI_ATTACH) {
+ DRM_ERROR("radeon_attach: only attach op supported");
+ return (DDI_FAILURE);
+ }
+
+ unit = ddi_get_instance(dip);
+ if (ddi_soft_state_zalloc(radeon_statep, unit) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "radeon_attach: alloc softstate failed unit=%d", unit);
+ return (DDI_FAILURE);
+ }
+ statep = ddi_get_soft_state(radeon_statep, unit);
+ statep->dip = dip;
+ statep->driver = &radeon_driver;
+
+ /*
+ * Call drm_supp_register to create minor nodes for us
+ */
+ handle = drm_supp_register(dip, statep);
+ if (handle == NULL) {
+ DRM_ERROR("radeon_attach: drm_supp_register failed");
+ goto err_exit1;
+ }
+ statep->drm_handle = handle;
+
+ /*
+ * After drm_supp_register, we can call drm_xxx routine
+ */
+ statep->drm_supported = DRM_UNSUPPORT;
+ if (drm_probe(statep, radeon_pciidlist) != DDI_SUCCESS) {
+ DRM_ERROR("radeon_open: "
+ "DRM current don't support this graphics card");
+ goto err_exit2;
+ }
+ statep->drm_supported = DRM_SUPPORT;
+
+ /* call common attach code */
+ if (drm_attach(statep) != DDI_SUCCESS) {
+ DRM_ERROR("radeon_attach: drm_attach failed");
+ goto err_exit2;
+ }
+ return (DDI_SUCCESS);
+
+err_exit2:
+ (void) drm_supp_unregister(handle);
+err_exit1:
+ (void) ddi_soft_state_free(radeon_statep, unit);
+ return (DDI_FAILURE);
+
+} /* radeon_attach() */
+
+static int
+radeon_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ drm_device_t *statep;
+ int unit;
+
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ unit = ddi_get_instance(dip);
+ statep = ddi_get_soft_state(radeon_statep, unit);
+ if (statep == NULL)
+ return (DDI_FAILURE);
+
+ (void) drm_detach(statep);
+ (void) drm_supp_unregister(statep->drm_handle);
+ (void) ddi_soft_state_free(radeon_statep, unit);
+
+ return (DDI_SUCCESS);
+
+} /* radeon_detach() */
+
+/*ARGSUSED*/
+static int
+radeon_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ drm_device_t *statep;
+ int error = DDI_SUCCESS;
+ int unit;
+
+ unit = drm_dev_to_instance((dev_t)arg);
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ statep = ddi_get_soft_state(radeon_statep, unit);
+ if (statep == NULL || statep->dip == NULL) {
+ error = DDI_FAILURE;
+ } else {
+ *result = (void *) statep->dip;
+ error = DDI_SUCCESS;
+ }
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)unit;
+ error = DDI_SUCCESS;
+ break;
+ default:
+ error = DDI_FAILURE;
+ break;
+ }
+ return (error);
+
+} /* radeon_info() */
+
+static void
+radeon_configure(drm_driver_t *driver)
+{
+ driver->buf_priv_size = sizeof (drm_radeon_buf_priv_t);
+ driver->load = radeon_driver_load;
+ driver->unload = radeon_driver_unload;
+ driver->firstopen = radeon_driver_firstopen;
+ driver->open = radeon_driver_open;
+ driver->preclose = radeon_driver_preclose;
+ driver->postclose = radeon_driver_postclose;
+ driver->lastclose = radeon_driver_lastclose;
+ driver->vblank_wait = radeon_driver_vblank_wait;
+ driver->vblank_wait2 = radeon_driver_vblank_wait2;
+ driver->irq_preinstall = radeon_driver_irq_preinstall;
+ driver->irq_postinstall = radeon_driver_irq_postinstall;
+ driver->irq_uninstall = radeon_driver_irq_uninstall;
+ driver->irq_handler = radeon_driver_irq_handler;
+ driver->dma_ioctl = radeon_cp_buffers;
+
+ driver->driver_ioctls = radeon_ioctls;
+ driver->max_driver_ioctl = radeon_max_ioctl;
+
+ driver->driver_name = DRIVER_NAME;
+ driver->driver_desc = DRIVER_DESC;
+ driver->driver_date = DRIVER_DATE;
+ driver->driver_major = DRIVER_MAJOR;
+ driver->driver_minor = DRIVER_MINOR;
+ driver->driver_patchlevel = DRIVER_PATCHLEVEL;
+
+ driver->use_agp = 1;
+ driver->use_mtrr = 1;
+ driver->use_pci_dma = 1;
+ driver->use_sg = 1;
+ driver->use_dma = 1;
+ driver->use_irq = 1;
+ driver->use_vbl_irq = 1;
+ driver->use_vbl_irq2 = 1;
+
+} /* radeon_configure() */
diff --git a/usr/src/uts/intel/io/drm/radeon_drv.h b/usr/src/uts/intel/io/drm/radeon_drv.h
new file mode 100644
index 0000000..0a0b046
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_drv.h
@@ -0,0 +1,1203 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __RADEON_DRV_H__
+#define __RADEON_DRV_H__
+
+/*
+ * Enable debugging information outputs. Need to recompile
+ *
+ * #define RADEON_FIFO_DEBUG 1
+ */
+
+/* General customization: */
+
+#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others."
+
+#define DRIVER_NAME "radeon"
+#define DRIVER_DESC "ATI Radeon"
+#define DRIVER_DATE "20060524"
+
+/*
+ * Interface history:
+ *
+ * 1.1 - ??
+ * 1.2 - Add vertex2 ioctl (keith)
+ * - Add stencil capability to clear ioctl (gareth, keith)
+ * - Increase MAX_TEXTURE_LEVELS (brian)
+ * 1.3 - Add cmdbuf ioctl (keith)
+ * - Add support for new radeon packets (keith)
+ * - Add getparam ioctl (keith)
+ * - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
+ * 1.4 - Add scratch registers to get_param ioctl.
+ * 1.5 - Add r200 packets to cmdbuf ioctl
+ * - Add r200 function to init ioctl
+ * - Add 'scalar2' instruction to cmdbuf
+ * 1.6 - Add static GART memory manager
+ * Add irq handler (won't be turned on unless X server knows to)
+ * Add irq ioctls and irq_active getparam.
+ * Add wait command for cmdbuf ioctl
+ * Add GART offset query for getparam
+ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
+ * and R200_PP_CUBIC_OFFSET_F1_[0..5].
+ * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
+ * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
+ * Add 'GET' queries for starting additional clients on different
+ * VT's.
+ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
+ * Add texture rectangle support for r100.
+ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
+ * clients use to tell the DRM where they think the framebuffer is
+ * located in the card's address space
+ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
+ * and GL_EXT_blend_[func|equation]_separate on r200
+ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
+ * (No 3D support yet - just microcode loading).
+ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
+ * - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ * - Add R100/R200 surface allocation/free support
+ * 1.15- Add support for texture micro tiling
+ * - Add support for r100 cube maps
+ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
+ * texture filtering on r200
+ * 1.17- Add initial support for R300 (3D).
+ * 1.18- Add support for GL_ATI_fragment_shader, new packets
+ * R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
+ * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and
+ * R200_EMIT_ATF_TFACTOR
+ * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
+ * 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
+ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
+ * 1.23- Add new radeon memory map work from benh
+ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
+ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
+ * new packet type)
+ */
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 25
+#define DRIVER_PATCHLEVEL 0
+
+/*
+ * Radeon chip families
+ */
+enum radeon_family {
+ CHIP_R100,
+ CHIP_RV100,
+ CHIP_RS100,
+ CHIP_RV200,
+ CHIP_RS200,
+ CHIP_R200,
+ CHIP_RV250,
+ CHIP_RS300,
+ CHIP_RV280,
+ CHIP_R300,
+ CHIP_R350,
+ CHIP_RV350,
+ CHIP_RV380,
+ CHIP_R420,
+ CHIP_RV410,
+ CHIP_RS400,
+ CHIP_LAST,
+};
+
+enum radeon_cp_microcode_version {
+ UCODE_R100,
+ UCODE_R200,
+ UCODE_R300,
+};
+
+/*
+ * Chip flags
+ */
+#define RADEON_FAMILY_MASK 0x0000ffffUL
+#define RADEON_FLAGS_MASK 0xffff0000UL
+#define RADEON_IS_MOBILITY 0x00010000UL
+#define RADEON_IS_IGP 0x00020000UL
+#define RADEON_SINGLE_CRTC 0x00040000UL
+#define RADEON_IS_AGP 0x00080000UL
+#define RADEON_HAS_HIERZ 0x00100000UL
+#define RADEON_IS_PCIE 0x00200000UL
+#define RADEON_NEW_MEMMAP 0x00400000UL
+#define RADEON_IS_PCI 0x00800000UL
+
+#define GET_RING_HEAD(dev_priv) \
+ (dev_priv->writeback_works ? \
+ DRM_READ32((dev_priv)->ring_rptr, 0) : \
+ RADEON_READ(RADEON_CP_RB_RPTR))
+
+#define SET_RING_HEAD(dev_priv, val) \
+ DRM_WRITE32((dev_priv)->ring_rptr, 0, (val))
+
+typedef struct drm_radeon_freelist {
+ unsigned int age;
+ drm_buf_t *buf;
+ struct drm_radeon_freelist *next;
+ struct drm_radeon_freelist *prev;
+} drm_radeon_freelist_t;
+
+typedef struct drm_radeon_ring_buffer {
+ u32 *start;
+ u32 *end;
+ int size;
+ int size_l2qw;
+
+ u32 tail;
+ u32 tail_mask;
+ int space;
+
+ int high_mark;
+} drm_radeon_ring_buffer_t;
+
+typedef struct drm_radeon_depth_clear_t {
+ u32 rb3d_cntl;
+ u32 rb3d_zstencilcntl;
+ u32 se_cntl;
+} drm_radeon_depth_clear_t;
+
+struct drm_radeon_driver_file_fields {
+ int64_t radeon_fb_delta;
+};
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ int start;
+ int size;
+ drm_file_t *filp; /* 0: free, -1: heap, other: real files */
+};
+
+struct radeon_surface {
+ int refcount;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+};
+
+struct radeon_virt_surface {
+ int surface_index;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+ drm_file_t *filp;
+};
+
+typedef struct drm_radeon_private {
+
+ drm_radeon_ring_buffer_t ring;
+ drm_radeon_sarea_t *sarea_priv;
+
+ u32 fb_location;
+ u32 fb_size;
+ int new_memmap;
+
+ int gart_size;
+ u32 gart_vm_start;
+ unsigned long gart_buffers_offset;
+
+ int cp_mode;
+ int cp_running;
+
+ drm_radeon_freelist_t *head;
+ drm_radeon_freelist_t *tail;
+ int last_buf;
+ volatile u32 *scratch;
+ int writeback_works;
+
+ int usec_timeout;
+
+ int microcode_version;
+
+ struct {
+ u32 boxes;
+ int freelist_timeouts;
+ int freelist_loops;
+ int requested_bufs;
+ int last_frame_reads;
+ int last_clear_reads;
+ int clears;
+ int texture_uploads;
+ } stats;
+
+ int do_boxes;
+ int page_flipping;
+ int current_page;
+
+ u32 color_fmt;
+ unsigned int front_offset;
+ unsigned int front_pitch;
+ unsigned int back_offset;
+ unsigned int back_pitch;
+
+ u32 depth_fmt;
+ unsigned int depth_offset;
+ unsigned int depth_pitch;
+
+ u32 front_pitch_offset;
+ u32 back_pitch_offset;
+ u32 depth_pitch_offset;
+
+ drm_radeon_depth_clear_t depth_clear;
+
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long gart_textures_offset;
+
+ drm_local_map_t *sarea;
+ drm_local_map_t *mmio;
+ drm_local_map_t *cp_ring;
+ drm_local_map_t *ring_rptr;
+ drm_local_map_t *gart_textures;
+
+ struct mem_block *gart_heap;
+ struct mem_block *fb_heap;
+
+ /* SW interrupt */
+ wait_queue_head_t swi_queue;
+ atomic_t swi_emitted;
+ int vblank_crtc;
+ uint32_t irq_enable_reg;
+ int irq_enabled;
+
+
+ struct radeon_surface surfaces[RADEON_MAX_SURFACES];
+ struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES];
+
+ unsigned long pcigart_offset;
+ drm_ati_pcigart_info gart_info;
+
+ u32 scratch_ages[5];
+
+ /* starting from here on, data is preserved accross an open */
+ uint32_t flags; /* see radeon_chip_flags */
+
+} drm_radeon_private_t;
+
+typedef struct drm_radeon_buf_priv {
+ u32 age;
+} drm_radeon_buf_priv_t;
+
+typedef struct drm_radeon_kcmd_buffer {
+ int bufsz;
+ char *buf;
+ int nbox;
+ drm_clip_rect_t __user *boxes;
+} drm_radeon_kcmd_buffer_t;
+
+extern int radeon_no_wb;
+extern drm_ioctl_desc_t radeon_ioctls[];
+extern int radeon_max_ioctl;
+
+
+/*
+ * Check whether the given hardware address is inside the framebuffer or the
+ * GART area.
+ */
+#define RADEON_CHECK_OFFSET(dev_priv, off) \
+ (((off >= dev_priv->fb_location) && \
+ (off <= (dev_priv->fb_location + dev_priv->fb_size - 1))) || \
+ ((off >= dev_priv->gart_vm_start) && \
+ (off <= (dev_priv->gart_vm_start + dev_priv->gart_size - 1))))
+
+ /* radeon_cp.c */
+extern int radeon_cp_init(DRM_IOCTL_ARGS);
+extern int radeon_cp_start(DRM_IOCTL_ARGS);
+extern int radeon_cp_stop(DRM_IOCTL_ARGS);
+extern int radeon_cp_reset(DRM_IOCTL_ARGS);
+extern int radeon_cp_idle(DRM_IOCTL_ARGS);
+extern int radeon_cp_resume(DRM_IOCTL_ARGS);
+extern int radeon_engine_reset(DRM_IOCTL_ARGS);
+extern int radeon_fullscreen(DRM_IOCTL_ARGS);
+extern int radeon_cp_buffers(DRM_IOCTL_ARGS);
+
+extern void radeon_freelist_reset(drm_device_t *dev);
+extern drm_buf_t *radeon_freelist_get(drm_device_t *dev);
+
+extern int radeon_wait_ring(drm_radeon_private_t *dev_priv, int n);
+
+extern int radeon_do_cp_idle(drm_radeon_private_t *dev_priv);
+
+extern int radeon_mem_alloc(DRM_IOCTL_ARGS);
+extern int radeon_mem_free(DRM_IOCTL_ARGS);
+extern int radeon_mem_init_heap(DRM_IOCTL_ARGS);
+extern void radeon_mem_takedown(struct mem_block **heap);
+extern void radeon_mem_release(drm_file_t *filp, struct mem_block *heap);
+
+ /* radeon_irq.c */
+extern int radeon_irq_emit(DRM_IOCTL_ARGS);
+extern int radeon_irq_wait(DRM_IOCTL_ARGS);
+
+extern void radeon_do_release(drm_device_t *dev);
+extern int radeon_driver_vblank_wait(drm_device_t *dev,
+ unsigned int *sequence);
+extern int radeon_driver_vblank_wait2(drm_device_t *dev,
+ unsigned int *sequence);
+extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern int radeon_driver_irq_preinstall(drm_device_t *dev);
+extern void radeon_driver_irq_postinstall(drm_device_t *dev);
+extern void radeon_driver_irq_uninstall(drm_device_t *dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
+
+extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
+extern int radeon_driver_unload(struct drm_device *dev);
+extern int radeon_driver_firstopen(struct drm_device *dev);
+extern void radeon_driver_preclose(drm_device_t *dev, drm_file_t *filp);
+extern void radeon_driver_postclose(drm_device_t *dev, drm_file_t *filp);
+extern void radeon_driver_lastclose(drm_device_t *dev);
+extern int radeon_driver_open(drm_device_t *dev, drm_file_t *filp_priv);
+extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+/* r300_cmdbuf.c */
+extern void r300_init_reg_flags(void);
+
+extern int r300_do_cp_cmdbuf(drm_device_t *dev,
+ drm_file_t *fpriv, drm_radeon_kcmd_buffer_t *cmdbuf);
+
+/* Flags for stats.boxes */
+#define RADEON_BOX_DMA_IDLE 0x1
+#define RADEON_BOX_RING_FULL 0x2
+#define RADEON_BOX_FLIP 0x4
+#define RADEON_BOX_WAIT_IDLE 0x8
+#define RADEON_BOX_TEXTURE_LOAD 0x10
+
+/*
+ * Register definitions, register access macros and drmAddMap constants
+ * for Radeon kernel driver.
+ */
+#define RADEON_AGP_COMMAND 0x0f60
+#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
+#define RADEON_AGP_ENABLE (1<<8)
+#define RADEON_AUX_SCISSOR_CNTL 0x26f0
+#define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
+#define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
+#define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26)
+#define RADEON_SCISSOR_0_ENABLE (1 << 28)
+#define RADEON_SCISSOR_1_ENABLE (1 << 29)
+#define RADEON_SCISSOR_2_ENABLE (1 << 30)
+
+#define RADEON_BUS_CNTL 0x0030
+#define RADEON_BUS_MASTER_DIS (1 << 6)
+
+#define RADEON_CLOCK_CNTL_DATA 0x000c
+#define RADEON_PLL_WR_EN (1 << 7)
+#define RADEON_CLOCK_CNTL_INDEX 0x0008
+#define RADEON_CONFIG_APER_SIZE 0x0108
+#define RADEON_CONFIG_MEMSIZE 0x00f8
+#define RADEON_CRTC_OFFSET 0x0224
+#define RADEON_CRTC_OFFSET_CNTL 0x0228
+#define RADEON_CRTC_TILE_EN (1 << 15)
+#define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
+#define RADEON_CRTC2_OFFSET 0x0324
+#define RADEON_CRTC2_OFFSET_CNTL 0x0328
+
+#define RADEON_PCIE_INDEX 0x0030
+#define RADEON_PCIE_DATA 0x0034
+#define RADEON_PCIE_TX_GART_CNTL 0x10
+#define RADEON_PCIE_TX_GART_EN (1 << 0)
+#define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
+#define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
+#define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
+#define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0<<3)
+#define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1<<3)
+#define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1<<5)
+#define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE 0x13
+#define RADEON_PCIE_TX_GART_START_LO 0x14
+#define RADEON_PCIE_TX_GART_START_HI 0x15
+#define RADEON_PCIE_TX_GART_END_LO 0x16
+#define RADEON_PCIE_TX_GART_END_HI 0x17
+
+#define RADEON_MPP_TB_CONFIG 0x01c0
+#define RADEON_MEM_CNTL 0x0140
+#define RADEON_MEM_SDRAM_MODE_REG 0x0158
+#define RADEON_AGP_BASE 0x0170
+
+#define RADEON_RB3D_COLOROFFSET 0x1c40
+#define RADEON_RB3D_COLORPITCH 0x1c48
+
+#define RADEON_SRC_X_Y 0x1590
+
+#define RADEON_DP_GUI_MASTER_CNTL 0x146c
+#define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
+#define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
+#define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
+#define RADEON_GMC_BRUSH_NONE (15 << 4)
+#define RADEON_GMC_DST_16BPP (4 << 8)
+#define RADEON_GMC_DST_24BPP (5 << 8)
+#define RADEON_GMC_DST_32BPP (6 << 8)
+#define RADEON_GMC_DST_DATATYPE_SHIFT 8
+#define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
+#define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
+#define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
+#define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
+#define RADEON_GMC_WR_MSK_DIS (1 << 30)
+#define RADEON_ROP3_S 0x00cc0000
+#define RADEON_ROP3_P 0x00f00000
+#define RADEON_DP_WRITE_MASK 0x16cc
+#define RADEON_SRC_PITCH_OFFSET 0x1428
+#define RADEON_DST_PITCH_OFFSET 0x142c
+#define RADEON_DST_PITCH_OFFSET_C 0x1c80
+#define RADEON_DST_TILE_LINEAR (0 << 30)
+#define RADEON_DST_TILE_MACRO (1 << 30)
+#define RADEON_DST_TILE_MICRO ((uint_t)2 << 30)
+#define RADEON_DST_TILE_BOTH ((uint_t)3 << 30)
+
+#define RADEON_SCRATCH_REG0 0x15e0
+#define RADEON_SCRATCH_REG1 0x15e4
+#define RADEON_SCRATCH_REG2 0x15e8
+#define RADEON_SCRATCH_REG3 0x15ec
+#define RADEON_SCRATCH_REG4 0x15f0
+#define RADEON_SCRATCH_REG5 0x15f4
+#define RADEON_SCRATCH_UMSK 0x0770
+#define RADEON_SCRATCH_ADDR 0x0774
+
+#define RADEON_SCRATCHOFF(x) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
+
+#define GET_SCRATCH(x) (dev_priv->writeback_works ? \
+ DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(x)) : \
+ RADEON_READ(RADEON_SCRATCH_REG0 + 4*(x)))
+
+#define RADEON_GEN_INT_CNTL 0x0040
+#define RADEON_CRTC_VBLANK_MASK (1 << 0)
+#define RADEON_CRTC2_VBLANK_MASK (1 << 9)
+#define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
+#define RADEON_SW_INT_ENABLE (1 << 25)
+
+#define RADEON_GEN_INT_STATUS 0x0044
+#define RADEON_CRTC_VBLANK_STAT (1 << 0)
+#define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+#define RADEON_CRTC2_VBLANK_STAT (1 << 9)
+#define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+#define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
+#define RADEON_SW_INT_TEST (1 << 25)
+#define RADEON_SW_INT_TEST_ACK (1 << 25)
+#define RADEON_SW_INT_FIRE (1 << 26)
+
+#define RADEON_HOST_PATH_CNTL 0x0130
+#define RADEON_HDP_SOFT_RESET (1 << 26)
+#define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28)
+#define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28)
+
+#define RADEON_ISYNC_CNTL 0x1724
+#define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
+#define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
+#define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
+#define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
+#define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
+#define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
+
+#define RADEON_RBBM_GUICNTL 0x172c
+#define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
+#define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
+#define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
+#define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
+
+#define RADEON_MC_AGP_LOCATION 0x014c
+#define RADEON_MC_FB_LOCATION 0x0148
+#define RADEON_MCLK_CNTL 0x0012
+#define RADEON_FORCEON_MCLKA (1 << 16)
+#define RADEON_FORCEON_MCLKB (1 << 17)
+#define RADEON_FORCEON_YCLKA (1 << 18)
+#define RADEON_FORCEON_YCLKB (1 << 19)
+#define RADEON_FORCEON_MC (1 << 20)
+#define RADEON_FORCEON_AIC (1 << 21)
+
+#define RADEON_PP_BORDER_COLOR_0 0x1d40
+#define RADEON_PP_BORDER_COLOR_1 0x1d44
+#define RADEON_PP_BORDER_COLOR_2 0x1d48
+#define RADEON_PP_CNTL 0x1c38
+#define RADEON_SCISSOR_ENABLE (1 << 1)
+#define RADEON_PP_LUM_MATRIX 0x1d00
+#define RADEON_PP_MISC 0x1c14
+#define RADEON_PP_ROT_MATRIX_0 0x1d58
+#define RADEON_PP_TXFILTER_0 0x1c54
+#define RADEON_PP_TXOFFSET_0 0x1c5c
+#define RADEON_PP_TXFILTER_1 0x1c6c
+#define RADEON_PP_TXFILTER_2 0x1c84
+
+#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
+#define RADEON_RB2D_DC_FLUSH (3 << 0)
+#define RADEON_RB2D_DC_FREE (3 << 2)
+#define RADEON_RB2D_DC_FLUSH_ALL 0xf
+#define RADEON_RB2D_DC_BUSY 0x80000000
+#define RADEON_RB3D_CNTL 0x1c3c
+#define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
+#define RADEON_PLANE_MASK_ENABLE (1 << 1)
+#define RADEON_DITHER_ENABLE (1 << 2)
+#define RADEON_ROUND_ENABLE (1 << 3)
+#define RADEON_SCALE_DITHER_ENABLE (1 << 4)
+#define RADEON_DITHER_INIT (1 << 5)
+#define RADEON_ROP_ENABLE (1 << 6)
+#define RADEON_STENCIL_ENABLE (1 << 7)
+#define RADEON_Z_ENABLE (1 << 8)
+#define RADEON_ZBLOCK16 (1 << 15)
+#define RADEON_RB3D_DEPTHOFFSET 0x1c24
+#define RADEON_RB3D_DEPTHCLEARVALUE 0x3230
+#define RADEON_RB3D_DEPTHPITCH 0x1c28
+#define RADEON_RB3D_PLANEMASK 0x1d84
+#define RADEON_RB3D_STENCILREFMASK 0x1d7c
+#define RADEON_RB3D_ZCACHE_MODE 0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
+#define RADEON_RB3D_ZC_FLUSH (1 << 0)
+#define RADEON_RB3D_ZC_FREE (1 << 2)
+#define RADEON_RB3D_ZC_FLUSH_ALL 0x5
+#define RADEON_RB3D_ZC_BUSY 0x80000000UL
+#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
+#define RADEON_RB3D_DC_FLUSH (3 << 0)
+#define RADEON_RB3D_DC_FREE (3 << 2)
+#define RADEON_RB3D_DC_FLUSH_ALL 0xf
+#define RADEON_RB3D_DC_BUSY 0x80000000UL
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+#define RADEON_Z_TEST_MASK (7 << 4)
+#define RADEON_Z_TEST_ALWAYS (7 << 4)
+#define RADEON_Z_HIERARCHY_ENABLE (1 << 8)
+#define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
+#define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16)
+#define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
+#define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
+#define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
+#define RADEON_FORCE_Z_DIRTY (1 << 29)
+#define RADEON_Z_WRITE_ENABLE (1 << 30)
+#define RADEON_Z_DECOMPRESSION_ENABLE 0x80000000UL
+#define RADEON_RBBM_SOFT_RESET 0x00f0
+#define RADEON_SOFT_RESET_CP (1 << 0)
+#define RADEON_SOFT_RESET_HI (1 << 1)
+#define RADEON_SOFT_RESET_SE (1 << 2)
+#define RADEON_SOFT_RESET_RE (1 << 3)
+#define RADEON_SOFT_RESET_PP (1 << 4)
+#define RADEON_SOFT_RESET_E2 (1 << 5)
+#define RADEON_SOFT_RESET_RB (1 << 6)
+#define RADEON_SOFT_RESET_HDP (1 << 7)
+#define RADEON_RBBM_STATUS 0x0e40
+#define RADEON_RBBM_FIFOCNT_MASK 0x007f
+#define RADEON_RBBM_ACTIVE 0X80000000UL
+#define RADEON_RE_LINE_PATTERN 0x1cd0
+#define RADEON_RE_MISC 0x26c4
+#define RADEON_RE_TOP_LEFT 0x26c0
+#define RADEON_RE_WIDTH_HEIGHT 0x1c44
+#define RADEON_RE_STIPPLE_ADDR 0x1cc8
+#define RADEON_RE_STIPPLE_DATA 0x1ccc
+
+#define RADEON_SCISSOR_TL_0 0x1cd8
+#define RADEON_SCISSOR_BR_0 0x1cdc
+#define RADEON_SCISSOR_TL_1 0x1ce0
+#define RADEON_SCISSOR_BR_1 0x1ce4
+#define RADEON_SCISSOR_TL_2 0x1ce8
+#define RADEON_SCISSOR_BR_2 0x1cec
+#define RADEON_SE_COORD_FMT 0x1c50
+#define RADEON_SE_CNTL 0x1c4c
+#define RADEON_FFACE_CULL_CW (0 << 0)
+#define RADEON_BFACE_SOLID (3 << 1)
+#define RADEON_FFACE_SOLID (3 << 3)
+#define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
+#define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
+#define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
+#define RADEON_ALPHA_SHADE_FLAT (1 << 10)
+#define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
+#define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
+#define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+#define RADEON_FOG_SHADE_FLAT (1 << 14)
+#define RADEON_FOG_SHADE_GOURAUD (2 << 14)
+#define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
+#define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
+#define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
+#define RADEON_ROUND_MODE_TRUNC (0 << 28)
+#define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
+#define RADEON_SE_CNTL_STATUS 0x2140
+#define RADEON_SE_LINE_WIDTH 0x1db8
+#define RADEON_SE_VPORT_XSCALE 0x1d98
+#define RADEON_SE_ZBIAS_FACTOR 0x1db0
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
+#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200
+#define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16
+#define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28
+#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204
+#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208
+#define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16
+#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C
+#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8
+#define RADEON_SURFACE_ACCESS_CLR 0x0bfc
+#define RADEON_SURFACE_CNTL 0x0b00
+#define RADEON_SURF_TRANSLATION_DIS (1 << 8)
+#define RADEON_NONSURF_AP0_SWP_MASK (3 << 20)
+#define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20)
+#define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20)
+#define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20)
+#define RADEON_NONSURF_AP1_SWP_MASK (3 << 22)
+#define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22)
+#define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22)
+#define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22)
+#define RADEON_SURFACE0_INFO 0x0b0c
+#define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0)
+#define RADEON_SURF_TILE_MODE_MASK (3 << 16)
+#define RADEON_SURF_TILE_MODE_MACRO (0 << 16)
+#define RADEON_SURF_TILE_MODE_MICRO (1 << 16)
+#define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16)
+#define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16)
+#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+#define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0)
+#define RADEON_SURFACE1_INFO 0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
+#define RADEON_SURFACE2_INFO 0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
+#define RADEON_SURFACE3_INFO 0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
+#define RADEON_SURFACE4_INFO 0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
+#define RADEON_SURFACE5_INFO 0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
+#define RADEON_SURFACE6_INFO 0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
+#define RADEON_SURFACE7_INFO 0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
+#define RADEON_SW_SEMAPHORE 0x013c
+
+#define RADEON_WAIT_UNTIL 0x1720
+#define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+#define RADEON_WAIT_2D_IDLE (1 << 14)
+#define RADEON_WAIT_3D_IDLE (1 << 15)
+#define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
+#define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
+#define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
+
+#define RADEON_RB3D_ZMASKOFFSET 0x3234
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+#define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
+#define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+
+/* CP registers */
+#define RADEON_CP_ME_RAM_ADDR 0x07d4
+#define RADEON_CP_ME_RAM_RADDR 0x07d8
+#define RADEON_CP_ME_RAM_DATAH 0x07dc
+#define RADEON_CP_ME_RAM_DATAL 0x07e0
+
+#define RADEON_CP_RB_BASE 0x0700
+#define RADEON_CP_RB_CNTL 0x0704
+#define RADEON_BUF_SWAP_32BIT (2 << 16)
+#define RADEON_RB_NO_UPDATE (1 << 27)
+
+#define RADEON_CP_RB_RPTR_ADDR 0x070c
+#define RADEON_CP_RB_RPTR 0x0710
+#define RADEON_CP_RB_WPTR 0x0714
+
+#define RADEON_CP_RB_WPTR_DELAY 0x0718
+#define RADEON_PRE_WRITE_TIMER_SHIFT 0
+#define RADEON_PRE_WRITE_LIMIT_SHIFT 23
+
+#define RADEON_CP_IB_BASE 0x0738
+
+#define RADEON_CP_CSQ_CNTL 0x0740
+#define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
+#define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
+#define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
+#define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
+#define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
+#define RADEON_CSQ_PRIBM_INDBM (4 << 28)
+#define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
+
+#define RADEON_AIC_CNTL 0x01d0
+#define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
+#define RADEON_AIC_STAT 0x01d4
+#define RADEON_AIC_PT_BASE 0x01d8
+#define RADEON_AIC_LO_ADDR 0x01dc
+#define RADEON_AIC_HI_ADDR 0x01e0
+#define RADEON_AIC_TLB_ADDR 0x01e4
+#define RADEON_AIC_TLB_DATA 0x01e8
+
+/* CP command packets */
+#define RADEON_CP_PACKET0 0x00000000
+#define RADEON_ONE_REG_WR (1 << 15)
+#define RADEON_CP_PACKET1 0x40000000
+#define RADEON_CP_PACKET2 0x80000000
+#define RADEON_CP_PACKET3 0xC0000000
+#define RADEON_CP_NOP 0x00001000
+#define RADEON_CP_NEXT_CHAR 0x00001900
+#define RADEON_CP_PLY_NEXTSCAN 0x00001D00
+#define RADEON_CP_SET_SCISSORS 0x00001E00
+
+/* GEN_INDX_PRIM is unsupported starting with R300 */
+#define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
+#define RADEON_WAIT_FOR_IDLE 0x00002600
+#define RADEON_3D_DRAW_VBUF 0x00002800
+#define RADEON_3D_DRAW_IMMD 0x00002900
+#define RADEON_3D_DRAW_INDX 0x00002A00
+#define RADEON_CP_LOAD_PALETTE 0x00002C00
+#define RADEON_3D_LOAD_VBPNTR 0x00002F00
+#define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
+#define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
+#define RADEON_3D_CLEAR_ZMASK 0x00003200
+#define RADEON_CP_INDX_BUFFER 0x00003300
+#define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
+#define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
+#define RADEON_CP_3D_DRAW_INDX_2 0x00003600
+#define RADEON_3D_CLEAR_HIZ 0x00003700
+#define RADEON_CP_3D_CLEAR_CMASK 0x00003802
+#define RADEON_CNTL_HOSTDATA_BLT 0x00009400
+#define RADEON_CNTL_PAINT_MULTI 0x00009A00
+#define RADEON_CNTL_BITBLT_MULTI 0x00009B00
+#define RADEON_CNTL_SET_SCISSORS 0xC0001E00
+
+#define RADEON_CP_PACKET_MASK 0xC0000000
+#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
+#define RADEON_CP_PACKET0_REG_MASK 0x000007ff
+#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
+#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
+
+#define RADEON_VTX_Z_PRESENT 0x80000000
+#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3)
+
+#define RADEON_PRIM_TYPE_NONE (0 << 0)
+#define RADEON_PRIM_TYPE_POINT (1 << 0)
+#define RADEON_PRIM_TYPE_LINE (2 << 0)
+#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0)
+#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0)
+#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0)
+#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0)
+#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0)
+#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0)
+#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
+#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
+#define RADEON_PRIM_TYPE_MASK 0xf
+#define RADEON_PRIM_WALK_IND (1 << 4)
+#define RADEON_PRIM_WALK_LIST (2 << 4)
+#define RADEON_PRIM_WALK_RING (3 << 4)
+#define RADEON_COLOR_ORDER_BGRA (0 << 6)
+#define RADEON_COLOR_ORDER_RGBA (1 << 6)
+#define RADEON_MAOS_ENABLE (1 << 7)
+#define RADEON_VTX_FMT_R128_MODE (0 << 8)
+#define RADEON_VTX_FMT_RADEON_MODE (1 << 8)
+#define RADEON_NUM_VERTICES_SHIFT 16
+
+#define RADEON_COLOR_FORMAT_CI8 2
+#define RADEON_COLOR_FORMAT_ARGB1555 3
+#define RADEON_COLOR_FORMAT_RGB565 4
+#define RADEON_COLOR_FORMAT_ARGB8888 6
+#define RADEON_COLOR_FORMAT_RGB332 7
+#define RADEON_COLOR_FORMAT_RGB8 9
+#define RADEON_COLOR_FORMAT_ARGB4444 15
+
+#define RADEON_TXFORMAT_I8 0
+#define RADEON_TXFORMAT_AI88 1
+#define RADEON_TXFORMAT_RGB332 2
+#define RADEON_TXFORMAT_ARGB1555 3
+#define RADEON_TXFORMAT_RGB565 4
+#define RADEON_TXFORMAT_ARGB4444 5
+#define RADEON_TXFORMAT_ARGB8888 6
+#define RADEON_TXFORMAT_RGBA8888 7
+#define RADEON_TXFORMAT_Y8 8
+#define RADEON_TXFORMAT_VYUY422 10
+#define RADEON_TXFORMAT_YVYU422 11
+#define RADEON_TXFORMAT_DXT1 12
+#define RADEON_TXFORMAT_DXT23 14
+#define RADEON_TXFORMAT_DXT45 15
+
+#define R200_PP_TXCBLEND_0 0x2f00
+#define R200_PP_TXCBLEND_1 0x2f10
+#define R200_PP_TXCBLEND_2 0x2f20
+#define R200_PP_TXCBLEND_3 0x2f30
+#define R200_PP_TXCBLEND_4 0x2f40
+#define R200_PP_TXCBLEND_5 0x2f50
+#define R200_PP_TXCBLEND_6 0x2f60
+#define R200_PP_TXCBLEND_7 0x2f70
+#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268
+#define R200_PP_TFACTOR_0 0x2ee0
+#define R200_SE_VTX_FMT_0 0x2088
+#define R200_SE_VAP_CNTL 0x2080
+#define R200_SE_TCL_MATRIX_SEL_0 0x2230
+#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8
+#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0
+#define R200_PP_TXFILTER_5 0x2ca0
+#define R200_PP_TXFILTER_4 0x2c80
+#define R200_PP_TXFILTER_3 0x2c60
+#define R200_PP_TXFILTER_2 0x2c40
+#define R200_PP_TXFILTER_1 0x2c20
+#define R200_PP_TXFILTER_0 0x2c00
+#define R200_PP_TXOFFSET_5 0x2d78
+#define R200_PP_TXOFFSET_4 0x2d60
+#define R200_PP_TXOFFSET_3 0x2d48
+#define R200_PP_TXOFFSET_2 0x2d30
+#define R200_PP_TXOFFSET_1 0x2d18
+#define R200_PP_TXOFFSET_0 0x2d00
+
+#define R200_PP_CUBIC_FACES_0 0x2c18
+#define R200_PP_CUBIC_FACES_1 0x2c38
+#define R200_PP_CUBIC_FACES_2 0x2c58
+#define R200_PP_CUBIC_FACES_3 0x2c78
+#define R200_PP_CUBIC_FACES_4 0x2c98
+#define R200_PP_CUBIC_FACES_5 0x2cb8
+#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14
+#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c
+#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44
+#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c
+#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74
+#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c
+
+#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
+#define R200_SE_VTE_CNTL 0x20b0
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
+#define R200_PP_TAM_DEBUG3 0x2d9c
+#define R200_PP_CNTL_X 0x2cc4
+#define R200_SE_VAP_CNTL_STATUS 0x2140
+#define R200_RE_SCISSOR_TL_0 0x1cd8
+#define R200_RE_SCISSOR_TL_1 0x1ce0
+#define R200_RE_SCISSOR_TL_2 0x1ce8
+#define R200_RB3D_DEPTHXY_OFFSET 0x1d60
+#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
+#define R200_SE_VTX_STATE_CNTL 0x2180
+#define R200_RE_POINTSIZE 0x2648
+#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
+
+#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
+#define RADEON_PP_TEX_SIZE_1 0x1d0c
+#define RADEON_PP_TEX_SIZE_2 0x1d14
+
+#define RADEON_PP_CUBIC_FACES_0 0x1d24
+#define RADEON_PP_CUBIC_FACES_1 0x1d28
+#define RADEON_PP_CUBIC_FACES_2 0x1d2c
+#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
+
+#define RADEON_SE_TCL_STATE_FLUSH 0x2284
+
+#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001
+#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000
+#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012
+#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100
+#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200
+#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001
+#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002
+#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b
+#define R200_3D_DRAW_IMMD_2 0xC0003500
+#define R200_SE_VTX_FMT_1 0x208c
+#define R200_RE_CNTL 0x1c50
+
+#define R200_RB3D_BLENDCOLOR 0x3218
+
+#define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4
+
+#define R200_PP_TRI_PERF 0x2cf8
+
+#define R200_PP_AFS_0 0x2f80
+#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */
+
+#define R200_VAP_PVS_CNTL_1 0x22D0
+
+/* MPEG settings from VHA code */
+#define RADEON_VHA_SETTO16_1 0x2694
+#define RADEON_VHA_SETTO16_2 0x2680
+#define RADEON_VHA_SETTO0_1 0x1840
+#define RADEON_VHA_FB_OFFSET 0x19e4
+#define RADEON_VHA_SETTO1AND70S 0x19d8
+#define RADEON_VHA_DST_PITCH 0x1408
+
+// set as reference header
+#define RADEON_VHA_BACKFRAME0_OFF_Y 0x1840
+#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y 0x1844
+#define RADEON_VHA_BACKFRAME0_OFF_U 0x1848
+#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U 0x184c
+#define RADOEN_VHA_BACKFRAME0_OFF_V 0x1850
+#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V 0x1854
+#define RADEON_VHA_FORWFRAME0_OFF_Y 0x1858
+#define RADEON_VHA_FORWFRAME1_OFF_PITCH_Y 0x185c
+#define RADEON_VHA_FORWFRAME0_OFF_U 0x1860
+#define RADEON_VHA_FORWFRAME1_OFF_PITCH_U 0x1864
+#define RADEON_VHA_FORWFRAME0_OFF_V 0x1868
+#define RADEON_VHA_FORWFRAME0_OFF_PITCH_V 0x1880
+#define RADEON_VHA_BACKFRAME0_OFF_Y_2 0x1884
+#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y_2 0x1888
+#define RADEON_VHA_BACKFRAME0_OFF_U_2 0x188c
+#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U_2 0x1890
+#define RADEON_VHA_BACKFRAME0_OFF_V_2 0x1894
+#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2 0x1898
+
+
+
+/* Constants */
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+
+#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0
+#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1
+#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2
+#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3
+#define RADEON_LAST_DISPATCH 1
+
+#define RADEON_MAX_VB_AGE 0x7fffffff
+#define RADEON_MAX_VB_VERTS (0xffff)
+
+#define RADEON_RING_HIGH_MARK 128
+
+#define RADEON_PCIGART_TABLE_SIZE (32*1024)
+
+#define RADEON_READ(reg) \
+ DRM_READ32(dev_priv->mmio, (reg))
+#define RADEON_WRITE(reg, val) \
+ DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define RADEON_READ8(reg) \
+ DRM_READ8(dev_priv->mmio, (reg))
+#define RADEON_WRITE8(reg, val) \
+ DRM_WRITE8(dev_priv->mmio, (reg), (val))
+
+#define RADEON_WRITE_PLL(addr, val) \
+do { \
+ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
+ ((addr) & 0x1f) | RADEON_PLL_WR_EN); \
+ RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
+} while (*"\0")
+
+#define RADEON_WRITE_PCIE(addr, val) \
+do { \
+ RADEON_WRITE8(RADEON_PCIE_INDEX, \
+ ((addr) & 0xff)); \
+ RADEON_WRITE(RADEON_PCIE_DATA, (val)); \
+} while (*"\0")
+
+#define CP_PACKET0(reg, n) \
+ (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET0_TABLE(reg, n) \
+ (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET1(reg0, reg1) \
+ (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
+#define CP_PACKET2() \
+ (RADEON_CP_PACKET2)
+#define CP_PACKET3(pkt, n) \
+ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
+
+/*
+ * Engine control helper macros
+ */
+
+#define RADEON_WAIT_UNTIL_2D_IDLE() do { \
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); \
+ OUT_RING((RADEON_WAIT_2D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN)); \
+} while (*"\0")
+
+#define RADEON_WAIT_UNTIL_3D_IDLE() do { \
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); \
+ OUT_RING((RADEON_WAIT_3D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN)); \
+} while (*"\0")
+
+#define RADEON_WAIT_UNTIL_IDLE() do { \
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); \
+ OUT_RING((RADEON_WAIT_2D_IDLECLEAN | \
+ RADEON_WAIT_3D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN)); \
+} while (*"\0")
+
+#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); \
+ OUT_RING(RADEON_WAIT_CRTC_PFLIP); \
+} while (*"\0")
+
+#define RADEON_FLUSH_CACHE() do { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_DC_FLUSH); \
+} while (*"\0")
+
+#define RADEON_PURGE_CACHE() do { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \
+} while (*"\0")
+
+#define RADEON_FLUSH_ZCACHE() do { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_ZC_FLUSH); \
+} while (*"\0")
+
+#define RADEON_PURGE_ZCACHE() do { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL); \
+} while (*"\0")
+
+/*
+ * Misc helper macros
+ */
+
+/* Perfbox functionality only. */
+#define RING_SPACE_TEST_WITH_RETURN(dev_priv) \
+do { \
+ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \
+ u32 head = GET_RING_HEAD(dev_priv); \
+ if (head == dev_priv->ring.tail) \
+ dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \
+ } \
+} while (*"\0")
+
+#define VB_AGE_TEST_WITH_RETURN(dev_priv) \
+do { \
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \
+ if (sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE) { \
+ int __ret = radeon_do_cp_idle(dev_priv); \
+ if (__ret) \
+ return (__ret); \
+ sarea_priv->last_dispatch = 0; \
+ radeon_freelist_reset(dev); \
+ } \
+} while (*"\0")
+
+#define RADEON_DISPATCH_AGE(age) do { \
+ OUT_RING(CP_PACKET0(RADEON_LAST_DISPATCH_REG, 0)); \
+ OUT_RING(age); \
+} while (*"\0")
+
+#define RADEON_FRAME_AGE(age) do { \
+ OUT_RING(CP_PACKET0(RADEON_LAST_FRAME_REG, 0)); \
+ OUT_RING(age); \
+} while (*"\0")
+
+#define RADEON_CLEAR_AGE(age) do { \
+ OUT_RING(CP_PACKET0(RADEON_LAST_CLEAR_REG, 0)); \
+ OUT_RING(age); \
+} while (*"\0")
+
+/*
+ * Ring control
+ */
+#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
+
+#define BEGIN_RING(n) do { \
+ if (dev_priv->ring.space <= (n) * sizeof (u32)) { \
+ COMMIT_RING(); \
+ (void) radeon_wait_ring(dev_priv, (n) * sizeof (u32)); \
+ } \
+ _nr = n; dev_priv->ring.space -= (n) * sizeof (u32); \
+ ring = dev_priv->ring.start; \
+ write = dev_priv->ring.tail; \
+ mask = dev_priv->ring.tail_mask; \
+} while (*"\0")
+
+#define ADVANCE_RING() do { \
+ if (((dev_priv->ring.tail + _nr) & mask) != write) { \
+ DRM_ERROR( \
+ "ADVANCE_RING(): mismatch: nr: " \
+ "%x write: %x line: %d\n", \
+ ((dev_priv->ring.tail + _nr) & mask), \
+ write, __LINE__); \
+ } else \
+ dev_priv->ring.tail = write; \
+} while (*"\0")
+
+
+#if defined(lint) || defined(__lint)
+#define COMMIT_RING() /* For lint clean */
+#else
+#define COMMIT_RING() do { \
+ /* Flush writes to ring */ \
+ DRM_MEMORYBARRIER(); \
+ GET_RING_HEAD(dev_priv); \
+ RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail); \
+ /* read from PCI bus to ensure correct posting */ \
+ RADEON_READ(RADEON_CP_RB_RPTR); \
+} while (*"\0")
+#endif
+
+#define OUT_RING(x) do { \
+ ring[write++] = (x); \
+ write &= mask; \
+} while (*"\0")
+
+#define OUT_RING_REG(reg, val) do { \
+ OUT_RING(CP_PACKET0(reg, 0)); \
+ OUT_RING(val); \
+} while (*"\0")
+
+#define OUT_RING_TABLE(tab, sz) do { \
+ int _size = (sz); \
+ int *_tab = (int *)(uintptr_t)(tab); \
+ \
+ if (write + _size > mask) { \
+ int _i = (mask+1) - write; \
+ _size -= _i; \
+ while (_i > 0) { \
+ *(int *)(ring + write) = *_tab++; \
+ write++; \
+ _i--; \
+ } \
+ write = 0; \
+ _tab += _i; \
+ } \
+ while (_size > 0) { \
+ *(ring + write) = *_tab++; \
+ write++; \
+ _size--; \
+ } \
+ write &= mask; \
+} while (*"\0")
+
+#endif /* __RADEON_DRV_H__ */
diff --git a/usr/src/uts/intel/io/drm/radeon_io32.h b/usr/src/uts/intel/io/drm/radeon_io32.h
new file mode 100644
index 0000000..2febb5f
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_io32.h
@@ -0,0 +1,173 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifndef __RADEON_IO32_H__
+#define __RADEON_IO32_H__
+
+
+#ifdef _MULTI_DATAMODEL
+/*
+ * For radeon_cp_init()
+ */
+typedef struct drm_radeon_init_32 {
+ int func;
+ unsigned int sarea_priv_offset;
+ int is_pci; /* for overriding only */
+ int cp_mode;
+ int gart_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned int fb_offset DEPRECATED;
+ unsigned int mmio_offset DEPRECATED;
+ unsigned int ring_offset;
+ unsigned int ring_rptr_offset;
+ unsigned int buffers_offset;
+ unsigned int gart_textures_offset;
+} drm_radeon_init_32_t;
+
+/*
+ * radeon_cp_buffers()
+ */
+typedef struct drm_dma_32 {
+ int context;
+ int send_count;
+ uint32_t send_indices;
+ uint32_t send_sizes;
+ drm_dma_flags_t flags;
+ int request_count;
+ int request_size;
+ uint32_t request_indices;
+ uint32_t request_sizes;
+ int granted_count;
+} drm_dma_32_t;
+
+/*
+ * drm_radeon_clear()
+ */
+typedef struct drm_radeon_clear_32 {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask;
+ uint32_t depth_boxes;
+} drm_radeon_clear_32_t;
+
+/*
+ * For radeon_cp_texture()
+ */
+typedef struct drm_radeon_tex_image_32 {
+ unsigned int x, y;
+ unsigned int width, height;
+ uint32_t data;
+} drm_radeon_tex_image_32_t;
+
+typedef struct drm_radeon_texture_32 {
+ unsigned int offset;
+ int pitch;
+ int format;
+ int width;
+ int height;
+ uint32_t image;
+} drm_radeon_texture_32_t;
+
+/*
+ * for radeon_cp_stipple()
+ */
+typedef struct drm_radeon_stipple_32 {
+ uint32_t mask;
+} drm_radeon_stipple_32_t;
+
+/*
+ * radeon_cp_vertex2()
+ */
+typedef struct drm_radeon_vertex2_32 {
+ int idx;
+ int discard;
+ int nr_states;
+ uint32_t state;
+ int nr_prims;
+ uint32_t prim;
+} drm_radeon_vertex2_32_t;
+
+/*
+ * radeon_cp_cmdbuf()
+ */
+typedef struct drm_radeon_kcmd_buffer_32 {
+ int bufsz;
+ uint32_t buf;
+ int nbox;
+ uint32_t boxes;
+} drm_radeon_kcmd_buffer_32_t;
+
+/*
+ * radeon_cp_getparam()
+ */
+typedef struct drm_radeon_getparam_32 {
+ int param;
+ uint32_t value;
+} drm_radeon_getparam_32_t;
+
+
+/*
+ * radeon_mem_alloc()
+ */
+typedef struct drm_radeon_mem_alloc_32 {
+ int region;
+ int alignment;
+ int size;
+ uint32_t region_offset; /* offset from start of fb or GART */
+} drm_radeon_mem_alloc_32_t;
+
+
+/*
+ * radeon_irq_emit()
+ */
+typedef struct drm_radeon_irq_emit_32 {
+ uint32_t irq_seq;
+} drm_radeon_irq_emit_32_t;
+
+
+/*
+ * radeon_cp_setparam()
+ */
+#pragma pack(1)
+typedef struct drm_radeon_setparam_32 {
+ unsigned int param;
+ uint64_t value;
+} drm_radeon_setparam_32_t;
+#pragma pack()
+
+#endif /* _MULTI_DATAMODEL */
+#endif /* __RADEON_IO32_H__ */
diff --git a/usr/src/uts/intel/io/drm/radeon_irq.c b/usr/src/uts/intel/io/drm/radeon_irq.c
new file mode 100644
index 0000000..0c7e2e8
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_irq.c
@@ -0,0 +1,375 @@
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel D�zer <michel@daenzer.net>
+ */
+
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "radeon_io32.h"
+
+static inline u32
+radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 mask)
+{
+ uint32_t irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
+ if (irqs)
+ RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+ return (irqs);
+}
+
+/*
+ * Interrupts - Used for device synchronization and flushing in the
+ * following circumstances:
+ *
+ * - Exclusive FB access with hw idle:
+ * - Wait for GUI Idle (?) interrupt, then do normal flush.
+ *
+ * - Frame throttling, NV_fence:
+ * - Drop marker irq's into command stream ahead of time.
+ * - Wait on irq's with lock *not held*
+ * - Check each for termination condition
+ *
+ * - Internally in cp_getbuffer, etc:
+ * - as above, but wait with lock held???
+ *
+ * NOTE: These functions are misleadingly named -- the irq's aren't
+ * tied to dma at all, this is just a hangover from dri prehistory.
+ */
+
+irqreturn_t
+radeon_driver_irq_handler(DRM_IRQ_ARGS)
+{
+ drm_device_t *dev = (drm_device_t *)(uintptr_t)arg;
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+ u32 stat;
+
+ /*
+ * Only consider the bits we're interested in - others could be used
+ * outside the DRM
+ */
+ stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
+ RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT));
+ if (!stat)
+ return (IRQ_NONE);
+
+ stat &= dev_priv->irq_enable_reg;
+
+ /* SW interrupt */
+ if (stat & RADEON_SW_INT_TEST) {
+ DRM_WAKEUP(&dev_priv->swi_queue);
+ }
+
+ /* VBLANK interrupt */
+ if (stat & (RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT)) {
+ int vblank_crtc = dev_priv->vblank_crtc;
+
+ if ((vblank_crtc &
+ (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
+ (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+ if (stat & RADEON_CRTC_VBLANK_STAT)
+ atomic_inc(&dev->vbl_received);
+ if (stat & RADEON_CRTC2_VBLANK_STAT)
+ atomic_inc(&dev->vbl_received2);
+ } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
+ (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
+ ((stat & RADEON_CRTC2_VBLANK_STAT) &&
+ (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
+ atomic_inc(&dev->vbl_received);
+
+ DRM_WAKEUP(&dev->vbl_queue);
+ drm_vbl_send_signals(dev);
+ }
+
+ return (IRQ_HANDLED);
+}
+
+static int radeon_emit_irq(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ unsigned int ret;
+ RING_LOCALS;
+
+ atomic_inc(&dev_priv->swi_emitted);
+ ret = atomic_read(&dev_priv->swi_emitted);
+
+ BEGIN_RING(4);
+ OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+ OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ return (ret);
+}
+
+static int radeon_wait_irq(drm_device_t *dev, int swi_nr)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+ int ret = 0;
+
+ if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
+ return (0);
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ DRM_WAIT_ON(ret, &dev_priv->swi_queue, 3 * DRM_HZ,
+ RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
+
+ return (ret);
+}
+
+static int radeon_driver_vblank_do_wait(struct drm_device *dev,
+ unsigned int *sequence, int crtc)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+ unsigned int cur_vblank;
+ int ret = 0;
+ atomic_t *counter;
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ /*
+ * I don't know why reset Intr Status Register here,
+ * it might miss intr. So, I remove the code which
+ * exists in open source, and changes as follows:
+ */
+
+ if (crtc == DRM_RADEON_VBLANK_CRTC1) {
+ counter = &dev->vbl_received;
+ } else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
+ counter = &dev->vbl_received2;
+ } else
+ return (EINVAL);
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ /*
+ * Assume that the user has missed the current sequence number
+ * by about a day rather than she wants to wait for years
+ * using vertical blanks...
+ */
+ DRM_WAIT_ON(ret, &dev->vbl_queue, 3 * DRM_HZ,
+ (((cur_vblank = atomic_read(counter)) - *sequence) <= (1 << 23)));
+
+ *sequence = cur_vblank;
+
+ return (ret);
+}
+
+int
+radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+ return (radeon_driver_vblank_do_wait(dev, sequence,
+ DRM_RADEON_VBLANK_CRTC1));
+}
+
+int
+radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+ return (radeon_driver_vblank_do_wait(dev, sequence,
+ DRM_RADEON_VBLANK_CRTC2));
+}
+
+/*
+ * Needs the lock as it touches the ring.
+ */
+/*ARGSUSED*/
+int
+radeon_irq_emit(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_irq_emit_t emit;
+ int result;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_irq_emit_32_t emit32;
+
+ DRM_COPYFROM_WITH_RETURN(&emit32, (void *) data,
+ sizeof (emit32));
+ emit.irq_seq = (void *)(uintptr_t)(emit32.irq_seq);
+ } else {
+#endif
+
+ DRM_COPYFROM_WITH_RETURN(&emit, (void *) data, sizeof (emit));
+#ifdef _MULTI_DATAMODEL
+}
+#endif
+
+ result = radeon_emit_irq(dev);
+
+ if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof (int))) {
+ DRM_ERROR("copy_to_user\n");
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
+/*
+ * Doesn't need the hardware lock.
+ */
+/*ARGSUSED*/
+int
+radeon_irq_wait(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_irq_wait_t irqwait;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&irqwait, (void *) data, sizeof (irqwait));
+
+ return (radeon_wait_irq(dev, irqwait.irq_seq));
+}
+
+static void radeon_enable_interrupt(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv;
+
+ dev_priv = (drm_radeon_private_t *)dev->dev_private;
+ dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
+
+ if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1) {
+ dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
+ }
+
+ if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2) {
+ dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
+ }
+
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+ dev_priv->irq_enabled = 1;
+}
+
+
+/*
+ * drm_dma.h hooks
+ */
+int
+radeon_driver_irq_preinstall(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+
+ if (!dev_priv->mmio)
+ return (EINVAL);
+
+ /* Disable *all* interrupts */
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+ /* Clear bits if they're already high */
+ (void) radeon_acknowledge_irqs(dev_priv,
+ (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT |
+ RADEON_CRTC2_VBLANK_STAT));
+
+ return (0);
+}
+
+void
+radeon_driver_irq_postinstall(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+
+ atomic_set(&dev_priv->swi_emitted, 0);
+ DRM_INIT_WAITQUEUE(&dev_priv->swi_queue, DRM_INTR_PRI(dev));
+
+ radeon_enable_interrupt(dev);
+}
+
+void
+radeon_driver_irq_uninstall(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+ if (!dev_priv)
+ return;
+
+ /* Disable *all* interrupts */
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+ DRM_FINI_WAITQUEUE(&dev_priv->swi_queue);
+}
+
+int
+radeon_vblank_crtc_get(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv;
+ u32 flag;
+ u32 value;
+
+ dev_priv = (drm_radeon_private_t *)dev->dev_private;
+ flag = RADEON_READ(RADEON_GEN_INT_CNTL);
+ value = 0;
+
+ if (flag & RADEON_CRTC_VBLANK_MASK)
+ value |= DRM_RADEON_VBLANK_CRTC1;
+
+ if (flag & RADEON_CRTC2_VBLANK_MASK)
+ value |= DRM_RADEON_VBLANK_CRTC2;
+ return (value);
+}
+
+int
+radeon_vblank_crtc_set(drm_device_t *dev, int64_t value)
+{
+ drm_radeon_private_t *dev_priv;
+
+ dev_priv = (drm_radeon_private_t *)dev->dev_private;
+ if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+ DRM_ERROR("called with invalid crtc 0x%x\n",
+ (unsigned int)value);
+ return (EINVAL);
+ }
+ dev_priv->vblank_crtc = (unsigned int)value;
+ radeon_enable_interrupt(dev);
+ return (0);
+}
diff --git a/usr/src/uts/intel/io/drm/radeon_mem.c b/usr/src/uts/intel/io/drm/radeon_mem.c
new file mode 100644
index 0000000..b2ddba5
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_mem.c
@@ -0,0 +1,353 @@
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "radeon_io32.h"
+
+/*
+ * Very simple allocator for GART memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *
+split_block(struct mem_block *p, int start, int size, drm_file_t *filp)
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof (*newblock), DRM_MEM_BUFS);
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->filp = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof (*newblock), DRM_MEM_BUFS);
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->filp = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+out:
+ /* Our block is in the middle */
+ p->filp = filp;
+ return (p);
+}
+
+static struct mem_block *
+alloc_block(struct mem_block *heap, int size, int align2, drm_file_t *filp)
+{
+ struct mem_block *p;
+ int mask = (1 << align2) - 1;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ int start = (p->start + mask) & ~mask;
+ if (p->filp == 0 && start + size <= p->start + p->size)
+ return (split_block(p, start, size, filp));
+ }
+
+ return (NULL);
+}
+
+static struct mem_block *
+find_block(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next)
+ if (p->start == start)
+ return (p);
+
+ return (NULL);
+}
+
+static void
+free_block(struct mem_block *p)
+{
+ p->filp = NULL;
+
+ /*
+ * Assumes a single contiguous range. Needs a special filp in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->filp == 0) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof (*q), DRM_MEM_BUFS);
+ }
+
+ if (p->prev->filp == 0) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ drm_free(p, sizeof (*q), DRM_MEM_BUFS);
+ }
+}
+
+/*
+ * Initialize. How to check for an uninitialized heap?
+ */
+static int
+init_heap(struct mem_block **heap, int start, int size)
+{
+ struct mem_block *blocks = drm_alloc(sizeof (*blocks), DRM_MEM_BUFS);
+
+ if (!blocks)
+ return (ENOMEM);
+
+ *heap = drm_alloc(sizeof (**heap), DRM_MEM_BUFS);
+ if (!*heap) {
+ drm_free(blocks, sizeof (*blocks), DRM_MEM_BUFS);
+ return (ENOMEM);
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->filp = NULL;
+ blocks->next = blocks->prev = *heap;
+
+ (void) memset(*heap, 0, sizeof (**heap));
+ (*heap)->filp = (drm_file_t *)-1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return (0);
+}
+
+/*
+ * Free all blocks associated with the releasing file.
+ */
+void
+radeon_mem_release(drm_file_t *filp, struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ if (p->filp == filp)
+ p->filp = NULL;
+ }
+
+ /*
+ * Assumes a single contiguous range. Needs a special filp in
+ * 'heap' to stop it being subsumed.
+ */
+ for (p = heap->next; p != heap; p = p->next) {
+ while (p->filp == 0 && p->next->filp == 0) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof (*q), DRM_MEM_DRIVER);
+ }
+ }
+}
+
+/*
+ * Shutdown.
+ */
+void
+radeon_mem_takedown(struct mem_block **heap)
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next; p != *heap; ) {
+ struct mem_block *q = p;
+ p = p->next;
+ drm_free(q, sizeof (*q), DRM_MEM_DRIVER);
+ }
+
+ drm_free(*heap, sizeof (**heap), DRM_MEM_DRIVER);
+ *heap = NULL;
+}
+
+/* IOCTL HANDLERS */
+
+static struct mem_block **
+get_heap(drm_radeon_private_t *dev_priv, int region)
+{
+ switch (region) {
+ case RADEON_MEM_REGION_GART:
+ return (&dev_priv->gart_heap);
+ case RADEON_MEM_REGION_FB:
+ return (&dev_priv->fb_heap);
+ default:
+ return (NULL);
+ }
+}
+
+/*ARGSUSED*/
+int
+radeon_mem_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_alloc_t alloc;
+ struct mem_block *block, **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_mem_alloc_32_t alloc32;
+
+ DRM_COPYFROM_WITH_RETURN(&alloc32, (void *) data,
+ sizeof (alloc32));
+ alloc.region = alloc32.region;
+ alloc.alignment = alloc32.alignment;
+ alloc.size = alloc32.size;
+ alloc.region_offset = (void *)(uintptr_t)alloc32.region_offset;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&alloc, (void *) data, sizeof (alloc));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+
+ heap = get_heap(dev_priv, alloc.region);
+ if (!heap || !*heap)
+ return (EFAULT);
+
+ /*
+ * Make things easier on ourselves: all allocations at least
+ * 4k aligned.
+ */
+ if (alloc.alignment < 12)
+ alloc.alignment = 12;
+
+ block = alloc_block(*heap, alloc.size, alloc.alignment, fpriv);
+
+ if (!block)
+ return (ENOMEM);
+
+ if (DRM_COPY_TO_USER(alloc.region_offset, &block->start,
+ sizeof (int))) {
+ DRM_ERROR("copy_to_user\n");
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+radeon_mem_free(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_free_t memfree;
+ struct mem_block *block, **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&memfree, (void *) data, sizeof (memfree));
+
+ heap = get_heap(dev_priv, memfree.region);
+ if (!heap || !*heap)
+ return (EFAULT);
+
+ block = find_block(*heap, memfree.region_offset);
+ if (!block)
+ return (EFAULT);
+
+ if (block->filp != fpriv)
+ return (EPERM);
+
+ free_block(block);
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+radeon_mem_init_heap(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_init_heap_t initheap;
+ struct mem_block **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&initheap, (void *) data, sizeof (initheap));
+
+ heap = get_heap(dev_priv, initheap.region);
+ if (!heap)
+ return (EFAULT);
+
+ if (*heap) {
+ DRM_ERROR("heap already initialized?");
+ return (EFAULT);
+ }
+
+ return (init_heap(heap, initheap.start, initheap.size));
+}
diff --git a/usr/src/uts/intel/io/drm/radeon_state.c b/usr/src/uts/intel/io/drm/radeon_state.c
new file mode 100644
index 0000000..9323215
--- /dev/null
+++ b/usr/src/uts/intel/io/drm/radeon_state.c
@@ -0,0 +1,3530 @@
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "radeon_io32.h"
+
+/*
+ * Helper functions for client state checking and fixup
+ */
+
+static inline int
+radeon_check_and_fixup_offset(drm_radeon_private_t *dev_priv,
+ drm_file_t *filp_priv, u32 *offset)
+{
+ u64 off = *offset;
+ u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
+ struct drm_radeon_driver_file_fields *radeon_priv;
+
+ /*
+ * Hrm ... the story of the offset ... So this function converts
+ * the various ideas of what userland clients might have for an
+ * offset in the card address space into an offset into the card
+ * address space :) So with a sane client, it should just keep
+ * the value intact and just do some boundary checking. However,
+ * not all clients are sane. Some older clients pass us 0 based
+ * offsets relative to the start of the framebuffer and some may
+ * assume the AGP aperture it appended to the framebuffer, so we
+ * try to detect those cases and fix them up.
+ *
+ * Note: It might be a good idea here to make sure the offset lands
+ * in some "allowed" area to protect things like the PCIE GART...
+ */
+
+ /*
+ * First, the best case, the offset already lands in either the
+ * framebuffer or the GART mapped space
+ */
+
+ if (RADEON_CHECK_OFFSET(dev_priv, off))
+ return (0);
+
+ /*
+ * Ok, that didn't happen... now check if we have a zero based
+ * offset that fits in the framebuffer + gart space, apply the
+ * magic offset we get from SETPARAM or calculated from fb_location
+ */
+ if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
+ radeon_priv = filp_priv->driver_priv;
+ off += radeon_priv->radeon_fb_delta;
+ }
+
+ /* Finally, assume we aimed at a GART offset if beyond the fb */
+ if (off > fb_end)
+ off = off - fb_end - 1 + dev_priv->gart_vm_start;
+
+ /* Now recheck and fail if out of bounds */
+ if (RADEON_CHECK_OFFSET(dev_priv, off)) {
+ DRM_DEBUG("offset fixed up to 0x%x\n", off);
+ *offset = (uint32_t)off;
+ return (0);
+ }
+ return (EINVAL);
+}
+
+static inline int
+radeon_check_and_fixup_packets(drm_radeon_private_t *dev_priv,
+ drm_file_t *filp_priv, int id, u32 *data)
+{
+ switch (id) {
+
+ case RADEON_EMIT_PP_MISC:
+ if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
+ DRM_ERROR("Invalid depth buffer offset\n");
+ return (EINVAL);
+ }
+ break;
+
+ case RADEON_EMIT_PP_CNTL:
+ if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
+ DRM_ERROR("Invalid colour buffer offset\n");
+ return (EINVAL);
+ }
+ break;
+
+ case R200_EMIT_PP_TXOFFSET_0:
+ case R200_EMIT_PP_TXOFFSET_1:
+ case R200_EMIT_PP_TXOFFSET_2:
+ case R200_EMIT_PP_TXOFFSET_3:
+ case R200_EMIT_PP_TXOFFSET_4:
+ case R200_EMIT_PP_TXOFFSET_5:
+ if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ &data[0])) {
+ DRM_ERROR("Invalid R200 texture offset\n");
+ return (EINVAL);
+ }
+ break;
+
+ case RADEON_EMIT_PP_TXFILTER_0:
+ case RADEON_EMIT_PP_TXFILTER_1:
+ case RADEON_EMIT_PP_TXFILTER_2:
+ if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
+ DRM_ERROR("Invalid R100 texture offset\n");
+ return (EINVAL);
+ }
+ break;
+
+ case R200_EMIT_PP_CUBIC_OFFSETS_0:
+ case R200_EMIT_PP_CUBIC_OFFSETS_1:
+ case R200_EMIT_PP_CUBIC_OFFSETS_2:
+ case R200_EMIT_PP_CUBIC_OFFSETS_3:
+ case R200_EMIT_PP_CUBIC_OFFSETS_4:
+ case R200_EMIT_PP_CUBIC_OFFSETS_5: {
+ int i;
+ for (i = 0; i < 5; i++) {
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &data[i])) {
+ DRM_ERROR("Invalid R200 cubic"
+ " texture offset\n");
+ return (EINVAL);
+ }
+ }
+ break;
+ }
+
+ case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
+ case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
+ case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:
+ {
+ int i;
+ for (i = 0; i < 5; i++) {
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &data[i])) {
+ DRM_ERROR("Invalid R100 cubic"
+ " texture offset\n");
+ return (EINVAL);
+ }
+ }
+ }
+ break;
+
+ case R200_EMIT_VAP_CTL:
+ {
+ RING_LOCALS;
+ BEGIN_RING(2);
+ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+ ADVANCE_RING();
+ }
+ break;
+
+ case RADEON_EMIT_RB3D_COLORPITCH:
+ case RADEON_EMIT_RE_LINE_PATTERN:
+ case RADEON_EMIT_SE_LINE_WIDTH:
+ case RADEON_EMIT_PP_LUM_MATRIX:
+ case RADEON_EMIT_PP_ROT_MATRIX_0:
+ case RADEON_EMIT_RB3D_STENCILREFMASK:
+ case RADEON_EMIT_SE_VPORT_XSCALE:
+ case RADEON_EMIT_SE_CNTL:
+ case RADEON_EMIT_SE_CNTL_STATUS:
+ case RADEON_EMIT_RE_MISC:
+ case RADEON_EMIT_PP_BORDER_COLOR_0:
+ case RADEON_EMIT_PP_BORDER_COLOR_1:
+ case RADEON_EMIT_PP_BORDER_COLOR_2:
+ case RADEON_EMIT_SE_ZBIAS_FACTOR:
+ case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
+ case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
+ case R200_EMIT_PP_TXCBLEND_0:
+ case R200_EMIT_PP_TXCBLEND_1:
+ case R200_EMIT_PP_TXCBLEND_2:
+ case R200_EMIT_PP_TXCBLEND_3:
+ case R200_EMIT_PP_TXCBLEND_4:
+ case R200_EMIT_PP_TXCBLEND_5:
+ case R200_EMIT_PP_TXCBLEND_6:
+ case R200_EMIT_PP_TXCBLEND_7:
+ case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
+ case R200_EMIT_TFACTOR_0:
+ case R200_EMIT_VTX_FMT_0:
+ case R200_EMIT_MATRIX_SELECT_0:
+ case R200_EMIT_TEX_PROC_CTL_2:
+ case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
+ case R200_EMIT_PP_TXFILTER_0:
+ case R200_EMIT_PP_TXFILTER_1:
+ case R200_EMIT_PP_TXFILTER_2:
+ case R200_EMIT_PP_TXFILTER_3:
+ case R200_EMIT_PP_TXFILTER_4:
+ case R200_EMIT_PP_TXFILTER_5:
+ case R200_EMIT_VTE_CNTL:
+ case R200_EMIT_OUTPUT_VTX_COMP_SEL:
+ case R200_EMIT_PP_TAM_DEBUG3:
+ case R200_EMIT_PP_CNTL_X:
+ case R200_EMIT_RB3D_DEPTHXY_OFFSET:
+ case R200_EMIT_RE_AUX_SCISSOR_CNTL:
+ case R200_EMIT_RE_SCISSOR_TL_0:
+ case R200_EMIT_RE_SCISSOR_TL_1:
+ case R200_EMIT_RE_SCISSOR_TL_2:
+ case R200_EMIT_SE_VAP_CNTL_STATUS:
+ case R200_EMIT_SE_VTX_STATE_CNTL:
+ case R200_EMIT_RE_POINTSIZE:
+ case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
+ case R200_EMIT_PP_CUBIC_FACES_0:
+ case R200_EMIT_PP_CUBIC_FACES_1:
+ case R200_EMIT_PP_CUBIC_FACES_2:
+ case R200_EMIT_PP_CUBIC_FACES_3:
+ case R200_EMIT_PP_CUBIC_FACES_4:
+ case R200_EMIT_PP_CUBIC_FACES_5:
+ case RADEON_EMIT_PP_TEX_SIZE_0:
+ case RADEON_EMIT_PP_TEX_SIZE_1:
+ case RADEON_EMIT_PP_TEX_SIZE_2:
+ case R200_EMIT_RB3D_BLENDCOLOR:
+ case R200_EMIT_TCL_POINT_SPRITE_CNTL:
+ case RADEON_EMIT_PP_CUBIC_FACES_0:
+ case RADEON_EMIT_PP_CUBIC_FACES_1:
+ case RADEON_EMIT_PP_CUBIC_FACES_2:
+ case R200_EMIT_PP_TRI_PERF_CNTL:
+ case R200_EMIT_PP_AFS_0:
+ case R200_EMIT_PP_AFS_1:
+ case R200_EMIT_ATF_TFACTOR:
+ case R200_EMIT_PP_TXCTLALL_0:
+ case R200_EMIT_PP_TXCTLALL_1:
+ case R200_EMIT_PP_TXCTLALL_2:
+ case R200_EMIT_PP_TXCTLALL_3:
+ case R200_EMIT_PP_TXCTLALL_4:
+ case R200_EMIT_PP_TXCTLALL_5:
+ case R200_EMIT_VAP_PVS_CNTL:
+ /* These packets don't contain memory offsets */
+ break;
+
+ default:
+ DRM_ERROR("Unknown state packet ID %d\n", id);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static inline int
+radeon_check_and_fixup_packet3(drm_radeon_private_t *dev_priv,
+ drm_file_t *filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf,
+ unsigned int *cmdsz)
+{
+ u32 *cmd = (u32 *)(uintptr_t)cmdbuf->buf;
+ u32 offset, narrays;
+ int count, i, k;
+
+ *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+
+ if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
+ DRM_ERROR("Not a type 3 packet\n");
+ return (EINVAL);
+ }
+
+ if (4 * *cmdsz > cmdbuf->bufsz) {
+ DRM_ERROR("Packet size larger than size of data provided\n");
+ return (EINVAL);
+ }
+
+
+ switch (cmd[0] & 0xff00) {
+ /* XXX Are there old drivers needing other packets? */
+
+ case RADEON_3D_DRAW_IMMD:
+ case RADEON_3D_DRAW_VBUF:
+ case RADEON_3D_DRAW_INDX:
+ case RADEON_WAIT_FOR_IDLE:
+ case RADEON_CP_NOP:
+ case RADEON_3D_CLEAR_ZMASK:
+#if 0
+ case RADEON_CP_NEXT_CHAR:
+ case RADEON_CP_PLY_NEXTSCAN:
+ case RADEON_CP_SET_SCISSORS:
+ /* probably safe but will never need them? */
+#endif
+/* these packets are safe */
+ break;
+
+ case RADEON_CP_3D_DRAW_IMMD_2:
+ case RADEON_CP_3D_DRAW_VBUF_2:
+ case RADEON_CP_3D_DRAW_INDX_2:
+ case RADEON_3D_CLEAR_HIZ:
+ /* safe but r200 only */
+ if (dev_priv->microcode_version != UCODE_R200) {
+ DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+ return (EINVAL);
+ }
+ break;
+
+ case RADEON_3D_LOAD_VBPNTR:
+ count = (cmd[0] >> 16) & 0x3fff;
+
+ if (count > 18) { /* 12 arrays max */
+ DRM_ERROR(
+ "Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+ count);
+ return (EINVAL);
+ }
+
+ /* carefully check packet contents */
+ narrays = cmd[1] & ~0xc000;
+ k = 0;
+ i = 2;
+ while ((k < narrays) && (i < (count + 2))) {
+ i++; /* skip attribute field */
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &cmd[i])) {
+ DRM_ERROR(
+ "Invalid offset (k=%d i=%d) ini"
+ " 3D_LOAD_VBPNTR packet.\n", k, i);
+ return (EINVAL);
+ }
+ k++;
+ i++;
+ if (k == narrays)
+ break;
+ /* have one more to process, they come in pairs */
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &cmd[i])) {
+ DRM_ERROR(
+ "Invalid offset (k=%d i=%d) in"
+ " 3D_LOAD_VBPNTR packet.\n", k, i);
+ return (EINVAL);
+ }
+ k++;
+ i++;
+ }
+ /* do the counts match what we expect ? */
+ if ((k != narrays) || (i != (count + 2))) {
+ DRM_ERROR(
+ "Malformed 3D_LOAD_VBPNTR packet"
+ "(k=%d i=%d narrays=%d count+1=%d).\n",
+ k, i, narrays, count + 1);
+ return (EINVAL);
+ }
+ break;
+
+ case RADEON_3D_RNDR_GEN_INDX_PRIM:
+ if (dev_priv->microcode_version != UCODE_R100) {
+ DRM_ERROR("Invalid 3d packet for r200-class chip\n");
+ return (EINVAL);
+ }
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &cmd[1])) {
+ DRM_ERROR("Invalid rndr_gen_indx offset\n");
+ return (EINVAL);
+ }
+ break;
+
+ case RADEON_CP_INDX_BUFFER:
+ if (dev_priv->microcode_version != UCODE_R200) {
+ DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+ return (EINVAL);
+ }
+ if ((cmd[1] & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR(
+ "Invalid indx_buffer reg address %08X\n", cmd[1]);
+ return (EINVAL);
+ }
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &cmd[2])) {
+ DRM_ERROR(
+ "Invalid indx_buffer offset is %08X\n", cmd[2]);
+ return (EINVAL);
+ }
+ break;
+
+ case RADEON_CNTL_HOSTDATA_BLT:
+ case RADEON_CNTL_PAINT_MULTI:
+ case RADEON_CNTL_BITBLT_MULTI:
+ /* MSB of opcode: next DWORD GUI_CNTL */
+ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ offset = cmd[2] << 10;
+ if (radeon_check_and_fixup_offset
+ (dev_priv, filp_priv, &offset)) {
+ DRM_ERROR("Invalid first packet offset\n");
+ return (EINVAL);
+ }
+ cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
+ }
+
+ if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ offset = cmd[3] << 10;
+ if (radeon_check_and_fixup_offset
+ (dev_priv, filp_priv, &offset)) {
+ DRM_ERROR("Invalid second packet offset\n");
+ return (EINVAL);
+ }
+ cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
+ }
+ break;
+
+ default:
+ DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * CP hardware state programming functions
+ */
+
+static inline void radeon_emit_clip_rect(drm_radeon_private_t *dev_priv,
+ drm_clip_rect_t *box)
+{
+ RING_LOCALS;
+
+ DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n",
+ box->x1, box->y1, box->x2, box->y2);
+
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
+ OUT_RING((box->y1 << 16) | box->x1);
+ OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
+ OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
+ ADVANCE_RING();
+}
+
+/* Emit 1.1 state */
+static int radeon_emit_state(drm_radeon_private_t *dev_priv,
+ drm_file_t *filp_priv, drm_radeon_context_regs_t *ctx,
+ drm_radeon_texture_regs_t *tex, unsigned int dirty)
+{
+ RING_LOCALS;
+ DRM_DEBUG("dirty=0x%08x\n", dirty);
+
+ if (dirty & RADEON_UPLOAD_CONTEXT) {
+ if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ &ctx->rb3d_depthoffset)) {
+ DRM_ERROR("Invalid depth buffer offset\n");
+ return (EINVAL);
+ }
+
+ if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ &ctx->rb3d_coloroffset)) {
+ DRM_ERROR("Invalid depth buffer offset\n");
+ return (EINVAL);
+ }
+
+ BEGIN_RING(14);
+ OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
+ OUT_RING(ctx->pp_misc);
+ OUT_RING(ctx->pp_fog_color);
+ OUT_RING(ctx->re_solid_color);
+ OUT_RING(ctx->rb3d_blendcntl);
+ OUT_RING(ctx->rb3d_depthoffset);
+ OUT_RING(ctx->rb3d_depthpitch);
+ OUT_RING(ctx->rb3d_zstencilcntl);
+ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
+ OUT_RING(ctx->pp_cntl);
+ OUT_RING(ctx->rb3d_cntl);
+ OUT_RING(ctx->rb3d_coloroffset);
+ OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
+ OUT_RING(ctx->rb3d_colorpitch);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_VERTFMT) {
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
+ OUT_RING(ctx->se_coord_fmt);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_LINE) {
+ BEGIN_RING(5);
+ OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
+ OUT_RING(ctx->re_line_pattern);
+ OUT_RING(ctx->re_line_state);
+ OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
+ OUT_RING(ctx->se_line_width);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_BUMPMAP) {
+ BEGIN_RING(5);
+ OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
+ OUT_RING(ctx->pp_lum_matrix);
+ OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
+ OUT_RING(ctx->pp_rot_matrix_0);
+ OUT_RING(ctx->pp_rot_matrix_1);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_MASKS) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
+ OUT_RING(ctx->rb3d_stencilrefmask);
+ OUT_RING(ctx->rb3d_ropcntl);
+ OUT_RING(ctx->rb3d_planemask);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_VIEWPORT) {
+ BEGIN_RING(7);
+ OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
+ OUT_RING(ctx->se_vport_xscale);
+ OUT_RING(ctx->se_vport_xoffset);
+ OUT_RING(ctx->se_vport_yscale);
+ OUT_RING(ctx->se_vport_yoffset);
+ OUT_RING(ctx->se_vport_zscale);
+ OUT_RING(ctx->se_vport_zoffset);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_SETUP) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
+ OUT_RING(ctx->se_cntl);
+ OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
+ OUT_RING(ctx->se_cntl_status);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_MISC) {
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
+ OUT_RING(ctx->re_misc);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_TEX0) {
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &tex[0].pp_txoffset)) {
+ DRM_ERROR("Invalid texture offset for unit 0\n");
+ return (EINVAL);
+ }
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
+ OUT_RING(tex[0].pp_txfilter);
+ OUT_RING(tex[0].pp_txformat);
+ OUT_RING(tex[0].pp_txoffset);
+ OUT_RING(tex[0].pp_txcblend);
+ OUT_RING(tex[0].pp_txablend);
+ OUT_RING(tex[0].pp_tfactor);
+ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
+ OUT_RING(tex[0].pp_border_color);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_TEX1) {
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &tex[1].pp_txoffset)) {
+ DRM_ERROR("Invalid texture offset for unit 1\n");
+ return (EINVAL);
+ }
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
+ OUT_RING(tex[1].pp_txfilter);
+ OUT_RING(tex[1].pp_txformat);
+ OUT_RING(tex[1].pp_txoffset);
+ OUT_RING(tex[1].pp_txcblend);
+ OUT_RING(tex[1].pp_txablend);
+ OUT_RING(tex[1].pp_tfactor);
+ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
+ OUT_RING(tex[1].pp_border_color);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_TEX2) {
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv, &tex[2].pp_txoffset)) {
+ DRM_ERROR("Invalid texture offset for unit 2\n");
+ return (EINVAL);
+ }
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
+ OUT_RING(tex[2].pp_txfilter);
+ OUT_RING(tex[2].pp_txformat);
+ OUT_RING(tex[2].pp_txoffset);
+ OUT_RING(tex[2].pp_txcblend);
+ OUT_RING(tex[2].pp_txablend);
+ OUT_RING(tex[2].pp_tfactor);
+ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
+ OUT_RING(tex[2].pp_border_color);
+ ADVANCE_RING();
+ }
+
+ return (0);
+}
+
+/* Emit 1.2 state */
+static int radeon_emit_state2(drm_radeon_private_t *dev_priv,
+ drm_file_t *filp_priv, drm_radeon_state_t *state)
+{
+ RING_LOCALS;
+
+ if (state->dirty & RADEON_UPLOAD_ZBIAS) {
+ BEGIN_RING(3);
+ OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
+ OUT_RING(state->context2.se_zbias_factor);
+ OUT_RING(state->context2.se_zbias_constant);
+ ADVANCE_RING();
+ }
+
+ return (radeon_emit_state(dev_priv, filp_priv,
+ &state->context, state->tex, state->dirty));
+}
+
+/*
+ * New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
+ * 1.3 cmdbuffers allow all previous state to be updated as well as
+ * the tcl scalar and vector areas.
+ */
+static struct {
+ int start;
+ int len;
+ const char *name;
+} packet[RADEON_MAX_STATE_PACKETS] = {
+ {RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
+ {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
+ {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
+ {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
+ {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
+ {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
+ {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
+ {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
+ {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
+ {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
+ {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
+ {RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
+ {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
+ {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
+ {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
+ {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
+ {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
+ {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
+ {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
+ {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
+ {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
+ "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
+ {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
+ {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
+ {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
+ {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
+ {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
+ {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
+ {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
+ {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
+ {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
+ {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
+ {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
+ {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
+ {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
+ {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
+ {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
+ {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
+ {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
+ {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
+ {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
+ {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
+ {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
+ {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
+ {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
+ {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
+ {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
+ {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
+ {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
+ {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
+ {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
+ "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
+ {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
+ {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
+ {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
+ {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
+ {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
+ {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
+ {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
+ {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
+ {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
+ {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
+ {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
+ "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
+ {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
+ {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
+ {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
+ {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
+ {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
+ {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
+ {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
+ {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
+ {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
+ {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
+ {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
+ {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
+ {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
+ {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
+ {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
+ {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
+ {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
+ {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
+ {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
+ {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
+ {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
+ {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
+ {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
+ {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
+ {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
+ {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
+ {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
+ {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
+ {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
+ {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
+ {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
+ {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
+ {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
+ {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
+};
+
+/*
+ * Performance monitoring functions
+ */
+
+static void radeon_clear_box(drm_radeon_private_t *dev_priv,
+ int x, int y, int w, int h, int r, int g, int b)
+{
+ u32 color;
+ RING_LOCALS;
+
+ x += dev_priv->sarea_priv->boxes[0].x1;
+ y += dev_priv->sarea_priv->boxes[0].y1;
+
+ switch (dev_priv->color_fmt) {
+ case RADEON_COLOR_FORMAT_RGB565:
+ color = (((r & 0xf8) << 8) |
+ ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+ break;
+ case RADEON_COLOR_FORMAT_ARGB8888:
+ default:
+ color = (((0xfful) << 24) | (r << 16) | (g << 8) | b);
+ break;
+ }
+
+ BEGIN_RING(4);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+ OUT_RING(0xffffffff);
+ ADVANCE_RING();
+
+ BEGIN_RING(6);
+
+ OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
+ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+ if (dev_priv->page_flipping && dev_priv->current_page == 1) {
+ OUT_RING(dev_priv->front_pitch_offset);
+ } else {
+ OUT_RING(dev_priv->back_pitch_offset);
+ }
+
+ OUT_RING(color);
+
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv)
+{
+ /*
+ * Collapse various things into a wait flag -- trying to
+ * guess if userspase slept -- better just to have them tell us.
+ */
+ if (dev_priv->stats.last_frame_reads > 1 ||
+ dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+ }
+
+ if (dev_priv->stats.freelist_loops) {
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+ }
+
+ /* Purple box for page flipping */
+ if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
+ radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
+
+ /* Red box if we have to wait for idle at any point */
+ if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
+ radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
+
+ /* Blue box: lost context? */
+
+ /* Yellow box for texture swaps */
+ if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
+ radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
+
+ /* Green box if hardware never idles (as far as we can tell) */
+ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
+ radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+
+ /*
+ * Draw bars indicating number of buffers allocated
+ * (not a great measure, easily confused)
+ */
+ if (dev_priv->stats.requested_bufs) {
+ if (dev_priv->stats.requested_bufs > 100)
+ dev_priv->stats.requested_bufs = 100;
+
+ radeon_clear_box(dev_priv, 4, 16,
+ dev_priv->stats.requested_bufs, 4, 196, 128, 128);
+ }
+
+ (void) memset(&dev_priv->stats, 0, sizeof (dev_priv->stats));
+
+}
+
+/*
+ * CP command dispatch functions
+ */
+
+static void radeon_cp_dispatch_clear(drm_device_t *dev,
+ drm_radeon_clear_t *clear, drm_radeon_clear_rect_t *depth_boxes)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ unsigned int flags = clear->flags;
+ u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG("flags = 0x%x\n", flags);
+
+ dev_priv->stats.clears++;
+
+ if (dev_priv->page_flipping && dev_priv->current_page == 1) {
+ unsigned int tmp = flags;
+
+ flags &= ~(RADEON_FRONT | RADEON_BACK);
+ if (tmp & RADEON_FRONT)
+ flags |= RADEON_BACK;
+ if (tmp & RADEON_BACK)
+ flags |= RADEON_FRONT;
+ }
+
+ if (flags & (RADEON_FRONT | RADEON_BACK)) {
+
+ BEGIN_RING(4);
+
+ /*
+ * Ensure the 3D stream is idle before doing a
+ * 2D fill to clear the front or back buffer.
+ */
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+ OUT_RING(clear->color_mask);
+
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time. */
+ dev_priv->sarea_priv->ctx_owner = 0;
+
+ for (i = 0; i < nbox; i++) {
+ int x = pbox[i].x1;
+ int y = pbox[i].y1;
+ int w = pbox[i].x2 - x;
+ int h = pbox[i].y2 - y;
+
+ DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
+ x, y, w, h, flags);
+
+ if (flags & RADEON_FRONT) {
+ BEGIN_RING(6);
+
+ OUT_RING(CP_PACKET3
+ (RADEON_CNTL_PAINT_MULTI, 4));
+ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv-> color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P |
+ RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+ OUT_RING(dev_priv->front_pitch_offset);
+ OUT_RING(clear->clear_color);
+
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+ }
+
+ if (flags & RADEON_BACK) {
+ BEGIN_RING(6);
+
+ OUT_RING(CP_PACKET3
+ (RADEON_CNTL_PAINT_MULTI, 4));
+ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv-> color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P |
+ RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+ OUT_RING(dev_priv->back_pitch_offset);
+ OUT_RING(clear->clear_color);
+
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+ }
+ }
+ }
+
+ /* hyper z clear */
+ /*
+ * no docs available, based on reverse engeneering
+ * by Stephane Marchesin
+ */
+ if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) &&
+ (flags & RADEON_CLEAR_FASTZ)) {
+
+ int i;
+ int depthpixperline =
+ dev_priv->depth_fmt ==
+ RADEON_DEPTH_FORMAT_16BIT_INT_Z ?
+ (dev_priv->depth_pitch / 2) :
+ (dev_priv-> depth_pitch / 4);
+
+ u32 clearmask;
+
+ u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
+ ((clear->depth_mask & 0xff) << 24);
+
+ /*
+ * Make sure we restore the 3D state next time.
+ * we haven't touched any "normal" state - still
+ * need this?
+ */
+ dev_priv->sarea_priv->ctx_owner = 0;
+
+ if ((dev_priv->flags & RADEON_HAS_HIERZ) &&
+ (flags & RADEON_USE_HIERZ)) {
+ /* FIXME : reverse engineer that for Rx00 cards */
+ /*
+ * FIXME : the mask supposedly contains low-res
+ * z values. So can't set just to the max (0xff?
+ * or actually 0x3fff?), need to take z clear
+ * value into account?
+ */
+ /*
+ * pattern seems to work for r100, though get
+ * slight rendering errors with glxgears. If
+ * hierz is not enabled for r100, only 4 bits
+ * which indicate clear (15,16,31,32, all zero)
+ * matter, the other ones are ignored, and the
+ * same clear mask can be used. That's very
+ * different behaviour than R200 which needs
+ * different clear mask and different number
+ * of tiles to clear if hierz is enabled or not !?!
+ */
+ clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
+ } else {
+ /*
+ * clear mask : chooses the clearing pattern.
+ * rv250: could be used to clear only parts of macrotiles
+ * (but that would get really complicated...)?
+ * bit 0 and 1 (either or both of them ?!?!) are used to
+ * not clear tile (or maybe one of the bits indicates if
+ * the tile is compressed or not), bit 2 and 3 to not
+ * clear tile 1,...,.
+ * Pattern is as follows:
+ * | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
+ * bits -------------------------------------------------
+ * | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
+ * rv100: clearmask covers 2x8 4x1 tiles, but one clear
+ * still covers 256 pixels ?!?
+ */
+ clearmask = 0x0;
+ }
+
+ BEGIN_RING(8);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
+ tempRB3D_DEPTHCLEARVALUE);
+ /* what offset is this exactly ? */
+ OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
+ /* need ctlstat, otherwise get some strange black flickering */
+ OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
+ RADEON_RB3D_ZC_FLUSH_ALL);
+ ADVANCE_RING();
+
+ for (i = 0; i < nbox; i++) {
+ int tileoffset, nrtilesx, nrtilesy, j;
+ /*
+ * it looks like r200 needs rv-style clears, at
+ * least if hierz is not enabled?
+ */
+ if ((dev_priv->flags & RADEON_HAS_HIERZ) &&
+ !(dev_priv->microcode_version == UCODE_R200)) {
+ /*
+ * FIXME : figure this out for r200 (when hierz
+ * is enabled). Or maybe r200 actually doesn't
+ * need to put the low-res z value into the tile
+ * cache like r100, but just needs to clear the
+ * hi-level z-buffer? Works for R100, both with
+ * hierz and without.R100 seems to operate on
+ * 2x1 8x8 tiles, but... odd: offset/nrtiles
+ * need to be 64 pix (4 blocka) aligned?
+ * Potentially problematic with resolutions
+ * which are not 64 pix aligned?
+ */
+ tileoffset =
+ ((pbox[i].y1 >> 3) * depthpixperline +
+ pbox[i].x1) >> 6;
+ nrtilesx =
+ ((pbox[i].x2 & ~63) -
+ (pbox[i].x1 & ~63)) >> 4;
+ nrtilesy =
+ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+ for (j = 0; j <= nrtilesy; j++) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3
+ (RADEON_3D_CLEAR_ZMASK, 2));
+ /* first tile */
+ OUT_RING(tileoffset * 8);
+ /* the number of tiles to clear */
+ OUT_RING(nrtilesx + 4);
+ /*
+ * clear mask :
+ * chooses the clearing pattern.
+ */
+ OUT_RING(clearmask);
+ ADVANCE_RING();
+ tileoffset += depthpixperline >> 6;
+ }
+ } else if (dev_priv->microcode_version == UCODE_R200) {
+ /* works for rv250. */
+ /*
+ * find first macro tile
+ * (8x2 4x4 z-pixels on rv250)
+ */
+ tileoffset =
+ ((pbox[i].y1 >> 3) * depthpixperline +
+ pbox[i].x1) >> 5;
+ nrtilesx =
+ (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
+ nrtilesy =
+ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+ for (j = 0; j <= nrtilesy; j++) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3
+ (RADEON_3D_CLEAR_ZMASK, 2));
+ /* first tile */
+ /*
+ * judging by the first tile
+ * offset needed, could possibly
+ * directly address/clear 4x4
+ * tiles instead of 8x2 * 4x4
+ * macro tiles, though would
+ * still need clear mask for
+ * right/bottom if truely 4x4
+ * granularity is desired ?
+ */
+ OUT_RING(tileoffset * 16);
+ /* the number of tiles to clear */
+ OUT_RING(nrtilesx + 1);
+ /*
+ * clear mask :
+ * chooses the clearing pattern.
+ */
+ OUT_RING(clearmask);
+ ADVANCE_RING();
+ tileoffset += depthpixperline >> 5;
+ }
+ } else { /* rv 100 */
+ /* rv100 might not need 64 pix alignment */
+ /* offsets are, hmm, weird */
+ tileoffset =
+ ((pbox[i].y1 >> 4) * depthpixperline +
+ pbox[i].x1) >> 6;
+ nrtilesx =
+ ((pbox[i].x2 & ~63) -
+ (pbox[i].x1 & ~63)) >> 4;
+ nrtilesy =
+ (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
+ for (j = 0; j <= nrtilesy; j++) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3
+ (RADEON_3D_CLEAR_ZMASK, 2));
+ OUT_RING(tileoffset * 128);
+ /* the number of tiles to clear */
+ OUT_RING(nrtilesx + 4);
+ /*
+ * clear mask :
+ * chooses the clearing pattern.
+ */
+ OUT_RING(clearmask);
+ ADVANCE_RING();
+ tileoffset += depthpixperline >> 6;
+ }
+ }
+ }
+
+ /* TODO don't always clear all hi-level z tiles */
+ if ((dev_priv->flags & RADEON_HAS_HIERZ) &&
+ (dev_priv->microcode_version == UCODE_R200) &&
+ (flags & RADEON_USE_HIERZ))
+ /*
+ * r100 and cards without hierarchical z-buffer
+ * have no high-level z-buffer
+ */
+ /*
+ * FIXME : the mask supposedly contains low-res
+ * z values. So can't set just to the max (0xff?
+ * or actually 0x3fff?), need to take z clear value
+ * into account?
+ */
+ {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
+ OUT_RING(0x0); /* First tile */
+ OUT_RING(0x3cc0);
+ OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
+ ADVANCE_RING();
+ }
+ }
+
+ /*
+ * We have to clear the depth and/or stencil buffers by
+ * rendering a quad into just those buffers. Thus, we have to
+ * make sure the 3D engine is configured correctly.
+ */
+ else if ((dev_priv->microcode_version == UCODE_R200) &&
+ (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+ int tempPP_CNTL;
+ int tempRE_CNTL;
+ int tempRB3D_CNTL;
+ int tempRB3D_ZSTENCILCNTL;
+ int tempRB3D_STENCILREFMASK;
+ int tempRB3D_PLANEMASK;
+ int tempSE_CNTL;
+ int tempSE_VTE_CNTL;
+ int tempSE_VTX_FMT_0;
+ int tempSE_VTX_FMT_1;
+ int tempSE_VAP_CNTL;
+ int tempRE_AUX_SCISSOR_CNTL;
+
+ tempPP_CNTL = 0;
+ tempRE_CNTL = 0;
+
+ tempRB3D_CNTL = depth_clear->rb3d_cntl;
+
+ tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+ tempRB3D_STENCILREFMASK = 0x0;
+
+ tempSE_CNTL = depth_clear->se_cntl;
+
+ /* Disable TCL */
+
+ tempSE_VAP_CNTL =
+ (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
+ (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
+
+ tempRB3D_PLANEMASK = 0x0;
+
+ tempRE_AUX_SCISSOR_CNTL = 0x0;
+
+ tempSE_VTE_CNTL =
+ SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
+
+ /* Vertex format (X, Y, Z, W) */
+ tempSE_VTX_FMT_0 =
+ SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
+ SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
+ tempSE_VTX_FMT_1 = 0x0;
+
+ /*
+ * Depth buffer specific enables
+ */
+ if (flags & RADEON_DEPTH) {
+ /* Enable depth buffer */
+ tempRB3D_CNTL |= RADEON_Z_ENABLE;
+ } else {
+ /* Disable depth buffer */
+ tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
+ }
+
+ /*
+ * Stencil buffer specific enables
+ */
+ if (flags & RADEON_STENCIL) {
+ tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
+ tempRB3D_STENCILREFMASK = clear->depth_mask;
+ } else {
+ tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
+ tempRB3D_STENCILREFMASK = 0x00000000;
+ }
+
+ if (flags & RADEON_USE_COMP_ZBUF) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+ RADEON_Z_DECOMPRESSION_ENABLE;
+ }
+ if (flags & RADEON_USE_HIERZ) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+ }
+
+ BEGIN_RING(26);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
+ OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
+ OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
+ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
+ tempRB3D_STENCILREFMASK);
+ OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
+ OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
+ OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
+ OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
+ OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
+ OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
+ OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time. */
+ dev_priv->sarea_priv->ctx_owner = 0;
+
+ for (i = 0; i < nbox; i++) {
+
+ /*
+ * Funny that this should be required --
+ * sets top-left?
+ */
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+ BEGIN_RING(14);
+ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
+ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+ RADEON_PRIM_WALK_RING |
+ (3 << RADEON_NUM_VERTICES_SHIFT)));
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x3f800000);
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x3f800000);
+ OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x3f800000);
+ ADVANCE_RING();
+ }
+ } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+ int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+
+ rb3d_cntl = depth_clear->rb3d_cntl;
+
+ if (flags & RADEON_DEPTH) {
+ rb3d_cntl |= RADEON_Z_ENABLE;
+ } else {
+ rb3d_cntl &= ~RADEON_Z_ENABLE;
+ }
+
+ if (flags & RADEON_STENCIL) {
+ rb3d_cntl |= RADEON_STENCIL_ENABLE;
+
+ /* misnamed field */
+ rb3d_stencilrefmask = clear->depth_mask;
+
+ } else {
+ rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
+ rb3d_stencilrefmask = 0x00000000;
+ }
+
+ if (flags & RADEON_USE_COMP_ZBUF) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+ RADEON_Z_DECOMPRESSION_ENABLE;
+ }
+ if (flags & RADEON_USE_HIERZ) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+ }
+
+ BEGIN_RING(13);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
+ OUT_RING(0x00000000);
+ OUT_RING(rb3d_cntl);
+
+ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
+ OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
+ OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time. */
+ dev_priv->sarea_priv->ctx_owner = 0;
+
+ for (i = 0; i < nbox; i++) {
+
+ /*
+ * Funny that this should be required --
+ * sets top-left?
+ */
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+ BEGIN_RING(15);
+
+ OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
+ OUT_RING(RADEON_VTX_Z_PRESENT |
+ RADEON_VTX_PKCOLOR_PRESENT);
+ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+ RADEON_PRIM_WALK_RING |
+ RADEON_MAOS_ENABLE |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (3 << RADEON_NUM_VERTICES_SHIFT)));
+
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x0);
+
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x0);
+
+ OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x0);
+
+ ADVANCE_RING();
+ }
+ }
+
+ /*
+ * Increment the clear counter. The client-side 3D driver must
+ * wait on this value before performing the clear ioctl. We
+ * need this because the card's so damned fast...
+ */
+ dev_priv->sarea_priv->last_clear++;
+
+ BEGIN_RING(4);
+
+ RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_swap(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int i;
+ RING_LOCALS;
+
+ /* Do some trivial performance monitoring... */
+ if (dev_priv->do_boxes)
+ radeon_cp_performance_boxes(dev_priv);
+
+ /*
+ * Wait for the 3D stream to idle before dispatching the bitblt.
+ * This will prevent data corruption between the two streams.
+ */
+ BEGIN_RING(2);
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ ADVANCE_RING();
+
+ for (i = 0; i < nbox; i++) {
+ int x = pbox[i].x1;
+ int y = pbox[i].y1;
+ int w = pbox[i].x2 - x;
+ int h = pbox[i].y2 - y;
+
+ DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
+
+ BEGIN_RING(9);
+
+ OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
+ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+
+ /* Make this work even if front & back are flipped: */
+ OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
+ if (dev_priv->current_page == 0) {
+ OUT_RING(dev_priv->back_pitch_offset);
+ OUT_RING(dev_priv->front_pitch_offset);
+ } else {
+ OUT_RING(dev_priv->front_pitch_offset);
+ OUT_RING(dev_priv->back_pitch_offset);
+ }
+
+ OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
+ OUT_RING((x << 16) | y);
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+ }
+
+ /*
+ * Increment the frame counter. The client-side 3D driver must
+ * throttle the framerate by waiting for this value before
+ * performing the swapbuffer ioctl.
+ */
+ dev_priv->sarea_priv->last_frame ++;
+
+ BEGIN_RING(4);
+
+ RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_flip(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle;
+ int offset = (dev_priv->current_page == 1)
+ ? dev_priv->front_offset : dev_priv->back_offset;
+ RING_LOCALS;
+ DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
+ __FUNCTION__,
+ dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
+
+ /* Do some trivial performance monitoring... */
+ if (dev_priv->do_boxes) {
+ dev_priv->stats.boxes |= RADEON_BOX_FLIP;
+ radeon_cp_performance_boxes(dev_priv);
+ }
+
+ /* Update the frame offsets for both CRTCs */
+ BEGIN_RING(6);
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ OUT_RING_REG(RADEON_CRTC_OFFSET,
+ ((sarea->frame.y * dev_priv->front_pitch +
+ sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) + offset);
+ OUT_RING_REG(RADEON_CRTC2_OFFSET,
+ dev_priv->sarea_priv->crtc2_base + offset);
+
+ ADVANCE_RING();
+
+ /*
+ * Increment the frame counter. The client-side 3D driver must
+ * throttle the framerate by waiting for this value before
+ * performing the swapbuffer ioctl.
+ */
+ dev_priv->sarea_priv->last_frame ++;
+ dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
+ 1 - dev_priv->current_page;
+
+ BEGIN_RING(2);
+
+ RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+
+ ADVANCE_RING();
+}
+
+static int bad_prim_vertex_nr(int primitive, int nr)
+{
+ switch (primitive & RADEON_PRIM_TYPE_MASK) {
+ case RADEON_PRIM_TYPE_NONE:
+ case RADEON_PRIM_TYPE_POINT:
+ return (nr < 1);
+ case RADEON_PRIM_TYPE_LINE:
+ return ((nr & 1) || nr == 0);
+ case RADEON_PRIM_TYPE_LINE_STRIP:
+ return (nr < 2);
+ case RADEON_PRIM_TYPE_TRI_LIST:
+ case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
+ case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
+ case RADEON_PRIM_TYPE_RECT_LIST:
+ return (nr % 3 || nr == 0);
+ case RADEON_PRIM_TYPE_TRI_FAN:
+ case RADEON_PRIM_TYPE_TRI_STRIP:
+ return (nr < 3);
+ default:
+ return (1);
+ }
+}
+
+typedef struct {
+ unsigned int start;
+ unsigned int finish;
+ unsigned int prim;
+ unsigned int numverts;
+ unsigned int offset;
+ unsigned int vc_format;
+} drm_radeon_tcl_prim_t;
+
+static void radeon_cp_dispatch_vertex(drm_device_t *dev,
+ drm_buf_t *buf, drm_radeon_tcl_prim_t *prim)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
+ int numverts = (int)prim->numverts;
+ int nbox = sarea_priv->nbox;
+ int i = 0;
+ RING_LOCALS;
+
+ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
+ prim->prim, prim->vc_format, prim->start,
+ prim->finish, prim->numverts);
+
+ if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
+ DRM_ERROR("bad prim %x numverts %d\n",
+ prim->prim, prim->numverts);
+ return;
+ }
+
+ do {
+ /* Emit the next cliprect */
+ if (i < nbox) {
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+ }
+
+ /* Emit the vertex buffer rendering commands */
+ BEGIN_RING(5);
+
+ OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
+ OUT_RING(offset);
+ OUT_RING(numverts);
+ OUT_RING(prim->vc_format);
+ OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
+ RADEON_COLOR_ORDER_RGBA |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (numverts << RADEON_NUM_VERTICES_SHIFT));
+
+ ADVANCE_RING();
+
+ i++;
+ } while (i < nbox);
+}
+
+static void radeon_cp_discard_buffer(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ RING_LOCALS;
+
+ buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+
+ /* Emit the vertex buffer age */
+ BEGIN_RING(2);
+ RADEON_DISPATCH_AGE(buf_priv->age);
+ ADVANCE_RING();
+
+ buf->pending = 1;
+ buf->used = 0;
+}
+
+static void radeon_cp_dispatch_indirect(drm_device_t *dev,
+ drm_buf_t *buf, int start, int end)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+ DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+
+ if (start != end) {
+ int offset = (dev_priv->gart_buffers_offset +
+ buf->offset + start);
+ int dwords = (end - start + 3) / sizeof (u32);
+
+ /*
+ * Indirect buffer data must be an even number of
+ * dwords, so if we've been given an odd number we must
+ * pad the data with a Type-2 CP packet.
+ */
+ if (dwords & 1) {
+ u32 *data = (u32 *)(uintptr_t)
+ ((char *)dev->agp_buffer_map->handle
+ + buf->offset + start);
+ data[dwords++] = RADEON_CP_PACKET2;
+ }
+
+ /* Fire off the indirect buffer */
+ BEGIN_RING(3);
+
+ OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
+ OUT_RING(offset);
+ OUT_RING(dwords);
+
+ ADVANCE_RING();
+ }
+}
+
+static void radeon_cp_dispatch_indices(drm_device_t *dev,
+ drm_buf_t *elt_buf, drm_radeon_tcl_prim_t *prim)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int offset = dev_priv->gart_buffers_offset + prim->offset;
+ u32 *data;
+ int dwords;
+ int i = 0;
+ int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
+ int count = (prim->finish - start) / sizeof (u16);
+ int nbox = sarea_priv->nbox;
+
+ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
+ prim->prim, prim->vc_format, prim->start,
+ prim->finish, prim->offset, prim->numverts);
+
+ if (bad_prim_vertex_nr(prim->prim, count)) {
+ DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
+ return;
+ }
+
+ if (start >= prim->finish || (prim->start & 0x7)) {
+ DRM_ERROR("buffer prim %d\n", prim->prim);
+ return;
+ }
+
+ dwords = (prim->finish - prim->start + 3) / sizeof (u32);
+
+ data = (u32 *)(uintptr_t)((char *)dev->agp_buffer_map->handle +
+ elt_buf->offset + prim->start);
+
+ data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
+ data[1] = offset;
+ data[2] = prim->numverts;
+ data[3] = prim->vc_format;
+ data[4] = (prim->prim |
+ RADEON_PRIM_WALK_IND |
+ RADEON_COLOR_ORDER_RGBA |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (count << RADEON_NUM_VERTICES_SHIFT));
+
+ do {
+ if (i < nbox)
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+ radeon_cp_dispatch_indirect(dev, elt_buf,
+ prim->start, prim->finish);
+
+ i++;
+ } while (i < nbox);
+
+}
+
+#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
+
+/*ARGSUSED*/
+static int radeon_cp_dispatch_texture(drm_file_t *fpriv,
+ drm_device_t *dev, drm_radeon_texture_t *tex,
+ drm_radeon_tex_image_t *image, int mode)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_buf_t *buf;
+ u32 format;
+ u32 *buffer;
+ const u8 __user *data;
+ int size, dwords, tex_width, blit_width, spitch;
+ u32 height;
+ int i;
+ u32 texpitch, microtile;
+ u32 offset;
+ RING_LOCALS;
+
+
+ if (radeon_check_and_fixup_offset(dev_priv, fpriv, &tex->offset)) {
+ DRM_ERROR("Invalid destination offset\n");
+ return (EINVAL);
+ }
+
+ dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
+
+ /*
+ * Flush the pixel cache. This ensures no pixel data gets mixed
+ * up with the texture data from the host data blit, otherwise
+ * part of the texture image may be corrupted.
+ */
+ BEGIN_RING(4);
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+
+ /*
+ * The compiler won't optimize away a division by a variable,
+ * even if the only legal values are powers of two. Thus, we'll
+ * use a shift instead.
+ */
+ switch (tex->format) {
+ case RADEON_TXFORMAT_ARGB8888:
+ case RADEON_TXFORMAT_RGBA8888:
+ format = RADEON_COLOR_FORMAT_ARGB8888;
+ tex_width = tex->width * 4;
+ blit_width = image->width * 4;
+ break;
+ case RADEON_TXFORMAT_AI88:
+ case RADEON_TXFORMAT_ARGB1555:
+ case RADEON_TXFORMAT_RGB565:
+ case RADEON_TXFORMAT_ARGB4444:
+ case RADEON_TXFORMAT_VYUY422:
+ case RADEON_TXFORMAT_YVYU422:
+ format = RADEON_COLOR_FORMAT_RGB565;
+ tex_width = tex->width * 2;
+ blit_width = image->width * 2;
+ break;
+ case RADEON_TXFORMAT_I8:
+ case RADEON_TXFORMAT_RGB332:
+ format = RADEON_COLOR_FORMAT_CI8;
+ tex_width = tex->width * 1;
+ blit_width = image->width * 1;
+ break;
+ default:
+ DRM_ERROR("invalid texture format %d\n", tex->format);
+ return (EINVAL);
+ }
+ spitch = blit_width >> 6;
+ if (spitch == 0 && image->height > 1)
+ return (EINVAL);
+
+ texpitch = tex->pitch;
+ if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
+ microtile = 1;
+ if (tex_width < 64) {
+ texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
+ /* we got tiled coordinates, untile them */
+ image->x *= 2;
+ }
+ } else
+ microtile = 0;
+
+ DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
+
+ do {
+ DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+ tex->offset >> 10, tex->pitch, tex->format,
+ image->x, image->y, image->width, image->height);
+
+ /*
+ * Make a copy of some parameters in case we have to
+ * update them for a multi-pass texture blit.
+ */
+ height = image->height;
+ data = (const u8 __user *)image->data;
+
+ size = height * blit_width;
+
+ if (size > RADEON_MAX_TEXTURE_SIZE) {
+ height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ size = height * blit_width;
+ } else if (size < 4 && size > 0) {
+ size = 4;
+ } else if (size == 0) {
+ return (0);
+ }
+
+ buf = radeon_freelist_get(dev);
+#if 0
+ if (0 && !buf) {
+ radeon_do_cp_idle(dev_priv);
+ buf = radeon_freelist_get(dev);
+ }
+#endif
+ if (!buf) {
+ DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) ==
+ DDI_MODEL_ILP32) {
+ drm_radeon_tex_image_32_t image32;
+ image32.x = image->x;
+ image32.y = image->y;
+ image32.width = image->width;
+ image32.height = image->height;
+ image32.data = (uint32_t)(uintptr_t)image->data;
+ DRM_COPYTO_WITH_RETURN(tex->image, &image32,
+ sizeof (image32));
+ } else {
+#endif
+ DRM_COPYTO_WITH_RETURN(tex->image, image,
+ sizeof (*image));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ return (EAGAIN);
+ }
+
+ /*
+ * Dispatch the indirect buffer.
+ */
+ buffer = (u32 *)(uintptr_t)
+ ((char *)dev->agp_buffer_map->handle + buf->offset);
+
+ dwords = size / 4;
+
+#define RADEON_COPY_MT(_buf, _data, _width) \
+ do { \
+ if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+ DRM_ERROR("%d: EFAULT on pad, %d bytes\n", \
+ __LINE__, (_width)); \
+ return (EFAULT); \
+ } \
+ } while (*"\0")
+
+ if (microtile) {
+ /*
+ * texture micro tiling in use, minimum texture
+ * width is thus 16 bytes. however, we cannot use
+ * blitter directly for texture width < 64 bytes,
+ * since minimum tex pitch is 64 bytes and we need
+ * this to match the texture width, otherwise the
+ * blitter will tile it wrong. Thus, tiling manually
+ * in this case. Additionally, need to special case
+ * tex height = 1, since our actual image will have
+ * height 2 and we need to ensure we don't read
+ * beyond the texture size from user space.
+ */
+ if (tex->height == 1) {
+ if (tex_width >= 64 || tex_width <= 16) {
+ RADEON_COPY_MT(buffer, data,
+ (int)(tex_width * sizeof (u32)));
+ } else if (tex_width == 32) {
+ RADEON_COPY_MT(buffer, data, 16);
+ RADEON_COPY_MT(buffer + 8,
+ data + 16, 16);
+ }
+ } else if (tex_width >= 64 || tex_width == 16) {
+ RADEON_COPY_MT(buffer, data,
+ (int)(dwords * sizeof (u32)));
+ } else if (tex_width < 16) {
+ for (i = 0; i < tex->height; i++) {
+ RADEON_COPY_MT(buffer, data, tex_width);
+ buffer += 4;
+ data += tex_width;
+ }
+ } else if (tex_width == 32) {
+ /*
+ * TODO: make sure this works when not
+ * fitting in one buffer
+ * (i.e. 32bytes x 2048...)
+ */
+ for (i = 0; i < tex->height; i += 2) {
+ RADEON_COPY_MT(buffer, data, 16);
+ data += 16;
+ RADEON_COPY_MT(buffer + 8, data, 16);
+ data += 16;
+ RADEON_COPY_MT(buffer + 4, data, 16);
+ data += 16;
+ RADEON_COPY_MT(buffer + 12, data, 16);
+ data += 16;
+ buffer += 16;
+ }
+ }
+ } else {
+ if (tex_width >= 32) {
+ /*
+ * Texture image width is larger than the
+ * minimum, so we can upload it directly.
+ */
+ RADEON_COPY_MT(buffer, data,
+ (int)(dwords * sizeof (u32)));
+ } else {
+ /*
+ * Texture image width is less than the minimum,
+ * so we need to pad out each image scanline to
+ * the minimum width.
+ */
+ for (i = 0; i < tex->height; i++) {
+ RADEON_COPY_MT(buffer, data, tex_width);
+ buffer += 8;
+ data += tex_width;
+ }
+ }
+ }
+
+#undef RADEON_COPY_MT
+ buf->filp = fpriv;
+ buf->used = size;
+ offset = dev_priv->gart_buffers_offset + buf->offset;
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+ OUT_RING((spitch << 22) | (offset >> 10));
+ OUT_RING((texpitch << 22) | (tex->offset >> 10));
+ OUT_RING(0);
+ OUT_RING((image->x << 16) | image->y);
+ OUT_RING((image->width << 16) | height);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+ COMMIT_RING();
+
+
+ radeon_cp_discard_buffer(dev, buf);
+
+ /* Update the input parameters for next time */
+ image->y += height;
+ image->height -= height;
+ image->data = (const u8 __user *)image->data + size;
+ } while (image->height > 0);
+
+ /*
+ * Flush the pixel cache after the blit completes. This ensures
+ * the texture data is written out to memory before rendering
+ * continues.
+ */
+ BEGIN_RING(4);
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+ COMMIT_RING();
+ return (0);
+}
+
+static void radeon_cp_dispatch_stipple(drm_device_t *dev, u32 *stipple)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(35);
+
+ OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
+ OUT_RING(0x00000000);
+
+ OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
+ for (i = 0; i < 32; i++) {
+ OUT_RING(stipple[i]);
+ }
+
+ ADVANCE_RING();
+}
+
+static void radeon_apply_surface_regs(int surf_index,
+ drm_radeon_private_t *dev_priv)
+{
+ if (!dev_priv->mmio)
+ return;
+
+ (void) radeon_do_cp_idle(dev_priv);
+
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
+ dev_priv->surfaces[surf_index].flags);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
+ dev_priv->surfaces[surf_index].lower);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
+ dev_priv->surfaces[surf_index].upper);
+}
+
+/*
+ * Allocates a virtual surface
+ * doesn't always allocate a real surface, will stretch an existing
+ * surface when possible.
+ *
+ * Note that refcount can be at most 2, since during a free refcount=3
+ * might mean we have to allocate a new surface which might not always
+ * be available.
+ * For example : we allocate three contigous surfaces ABC. If B is
+ * freed, we suddenly need two surfaces to store A and C, which might
+ * not always be available.
+ */
+static int alloc_surface(drm_radeon_surface_alloc_t *new,
+ drm_radeon_private_t *dev_priv, drm_file_t *filp)
+{
+ struct radeon_virt_surface *s;
+ int i;
+ int virt_surface_index;
+ uint32_t new_upper, new_lower;
+
+ new_lower = new->address;
+ new_upper = new_lower + new->size - 1;
+
+ /* sanity check */
+ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
+ ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
+ RADEON_SURF_ADDRESS_FIXED_MASK) ||
+ ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
+ return (-1);
+
+ /* make sure there is no overlap with existing surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if ((dev_priv->surfaces[i].refcount != 0) &&
+ (((new_lower >= dev_priv->surfaces[i].lower) &&
+ (new_lower < dev_priv->surfaces[i].upper)) ||
+ ((new_lower < dev_priv->surfaces[i].lower) &&
+ (new_upper > dev_priv->surfaces[i].lower)))) {
+ return (-1);
+ }
+ }
+
+ /* find a virtual surface */
+ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
+ if (dev_priv->virt_surfaces[i].filp == 0)
+ break;
+ if (i == 2 * RADEON_MAX_SURFACES) {
+ return (-1);
+ }
+ virt_surface_index = i;
+
+ /* try to reuse an existing surface */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ /* extend before */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_upper + 1 == dev_priv->surfaces[i].lower)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].lower = s->lower;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return (virt_surface_index);
+ }
+
+ /* extend after */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_lower == dev_priv->surfaces[i].upper + 1)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].upper = s->upper;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return (virt_surface_index);
+ }
+ }
+
+ /* okay, we need a new one */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if (dev_priv->surfaces[i].refcount == 0) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount = 1;
+ dev_priv->surfaces[i].lower = s->lower;
+ dev_priv->surfaces[i].upper = s->upper;
+ dev_priv->surfaces[i].flags = s->flags;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return (virt_surface_index);
+ }
+ }
+
+ /* we didn't find anything */
+ return (-1);
+}
+
+static int
+free_surface(drm_file_t *filp, drm_radeon_private_t *dev_priv, int lower)
+{
+ struct radeon_virt_surface *s;
+ int i;
+
+ /* find the virtual surface */
+ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+ s = &(dev_priv->virt_surfaces[i]);
+ if (s->filp) {
+ if ((lower == s->lower) && (filp == s->filp)) {
+ if (dev_priv->surfaces[s->surface_index].
+ lower == s->lower)
+ dev_priv->surfaces[s->surface_index].
+ lower = s->upper;
+
+ if (dev_priv->surfaces[s->surface_index].
+ upper == s->upper)
+ dev_priv->surfaces[s->surface_index].
+ upper = s->lower;
+
+ dev_priv->surfaces[s->surface_index].refcount--;
+ if (dev_priv->surfaces[s->surface_index].
+ refcount == 0)
+ dev_priv->surfaces[s->surface_index].
+ flags = 0;
+ s->filp = NULL;
+ radeon_apply_surface_regs(s->surface_index,
+ dev_priv);
+ return (0);
+ }
+ }
+ }
+
+ return (1);
+}
+
+static void radeon_surfaces_release(drm_file_t *filp,
+ drm_radeon_private_t *dev_priv)
+{
+ int i;
+
+ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+ if (dev_priv->virt_surfaces[i].filp == filp)
+ (void) free_surface(filp, dev_priv,
+ dev_priv->virt_surfaces[i].lower);
+ }
+}
+
+/*
+ * IOCTL functions
+ */
+/*ARGSUSED*/
+static int radeon_surface_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_alloc_t alloc;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&alloc, (void *)data, sizeof (alloc));
+
+ if (alloc_surface(&alloc, dev_priv, fpriv) == -1)
+ return (EINVAL);
+ else
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_surface_free(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_free_t memfree;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&memfree, (void *)data, sizeof (memfree));
+ if (free_surface(fpriv, dev_priv, memfree.address)) {
+ return (EINVAL);
+ }
+ else
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_clear(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_radeon_clear_t clear;
+ drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_clear_32_t clear32;
+ DRM_COPYFROM_WITH_RETURN(&clear32, (void *)data,
+ sizeof (clear32));
+ clear.flags = clear32.flags;
+ clear.clear_color = clear32.clear_color;
+ clear.clear_depth = clear32.clear_depth;
+ clear.color_mask = clear32.color_mask;
+ clear.depth_mask = clear32.depth_mask;
+ clear.depth_boxes = (void*)(uintptr_t)clear32.depth_boxes;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&clear, (void *)data, sizeof (clear));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+ if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes,
+ sarea_priv->nbox * sizeof (depth_boxes[0])))
+ return (EFAULT);
+
+ radeon_cp_dispatch_clear(dev, &clear, depth_boxes);
+
+ COMMIT_RING();
+ return (0);
+}
+
+/*
+ * Not sure why this isn't set all the time:
+ */
+static int radeon_do_init_pageflip(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ BEGIN_RING(6);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
+ OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
+ RADEON_CRTC_OFFSET_FLIP_CNTL);
+ OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
+ OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
+ RADEON_CRTC_OFFSET_FLIP_CNTL);
+ ADVANCE_RING();
+
+ dev_priv->page_flipping = 1;
+ dev_priv->current_page = 0;
+ dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
+
+ return (0);
+}
+
+/*
+ * Called whenever a client dies, from drm_release.
+ * NOTE: Lock isn't necessarily held when this is called!
+ */
+static int radeon_do_cleanup_pageflip(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->current_page != 0)
+ radeon_cp_dispatch_flip(dev);
+
+ dev_priv->page_flipping = 0;
+ return (0);
+}
+
+/*
+ * Swapping and flipping are different operations, need different ioctls.
+ * They can & should be intermixed to support multiple 3d windows.
+ */
+/*ARGSUSED*/
+static int radeon_cp_flip(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ if (!dev_priv->page_flipping)
+ (void) radeon_do_init_pageflip(dev);
+
+ radeon_cp_dispatch_flip(dev);
+
+ COMMIT_RING();
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_swap(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+ radeon_cp_dispatch_swap(dev);
+ dev_priv->sarea_priv->ctx_owner = 0;
+
+ COMMIT_RING();
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_vertex(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_vertex_t vertex;
+ drm_radeon_tcl_prim_t prim;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ sarea_priv = dev_priv->sarea_priv;
+
+ DRM_COPYFROM_WITH_RETURN(&vertex, (void *)data, sizeof (vertex));
+
+ DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
+ DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
+
+ if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ vertex.idx, dma->buf_count - 1);
+ return (EINVAL);
+ }
+ if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+ DRM_ERROR("buffer prim %d\n", vertex.prim);
+ return (EINVAL);
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf = dma->buflist[vertex.idx];
+
+ if (buf->filp != fpriv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->filp);
+ return (EINVAL);
+ }
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", vertex.idx);
+ return (EINVAL);
+ }
+
+ /*
+ * Build up a prim_t record:
+ */
+ if (vertex.count) {
+ buf->used = vertex.count; /* not used? */
+
+ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+ if (radeon_emit_state(dev_priv, fpriv,
+ &sarea_priv->context_state,
+ sarea_priv->tex_state,
+ sarea_priv->dirty)) {
+ DRM_ERROR("radeon_emit_state failed\n");
+ return (EINVAL);
+ }
+
+ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+ RADEON_UPLOAD_TEX1IMAGES |
+ RADEON_UPLOAD_TEX2IMAGES |
+ RADEON_REQUIRE_QUIESCENCE);
+ }
+
+ prim.start = 0;
+ prim.finish = vertex.count; /* unused */
+ prim.prim = vertex.prim;
+ prim.numverts = vertex.count;
+ prim.vc_format = dev_priv->sarea_priv->vc_format;
+
+ radeon_cp_dispatch_vertex(dev, buf, &prim);
+ }
+
+ if (vertex.discard) {
+ radeon_cp_discard_buffer(dev, buf);
+ }
+
+ COMMIT_RING();
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_indices(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_indices_t elts;
+ drm_radeon_tcl_prim_t prim;
+/* int count; */
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+ sarea_priv = dev_priv->sarea_priv;
+
+ DRM_COPYFROM_WITH_RETURN(&elts, (void *)data, sizeof (elts));
+
+ DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
+ DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard);
+
+ if (elts.idx < 0 || elts.idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ elts.idx, dma->buf_count - 1);
+ return (EINVAL);
+ }
+ if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+ DRM_ERROR("buffer prim %d\n", elts.prim);
+ return (EINVAL);
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf = dma->buflist[elts.idx];
+
+ if (buf->filp != fpriv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->filp);
+ return (EINVAL);
+ }
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", elts.idx);
+ return (EINVAL);
+ }
+
+/* count = (elts.end - elts.start) / sizeof(u16); */
+ elts.start -= RADEON_INDEX_PRIM_OFFSET;
+
+ if (elts.start & 0x7) {
+ DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
+ return (EINVAL);
+ }
+ if (elts.start < buf->used) {
+ DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
+ return (EINVAL);
+ }
+
+ buf->used = elts.end;
+
+ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+ if (radeon_emit_state(dev_priv, fpriv,
+ &sarea_priv->context_state,
+ sarea_priv->tex_state,
+ sarea_priv->dirty)) {
+ DRM_ERROR("radeon_emit_state failed\n");
+ return (EINVAL);
+ }
+
+ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+ RADEON_UPLOAD_TEX1IMAGES |
+ RADEON_UPLOAD_TEX2IMAGES |
+ RADEON_REQUIRE_QUIESCENCE);
+ }
+
+ /*
+ * Build up a prim_t record:
+ */
+ prim.start = elts.start;
+ prim.finish = elts.end;
+ prim.prim = elts.prim;
+ prim.offset = 0; /* offset from start of dma buffers */
+ prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
+ prim.vc_format = dev_priv->sarea_priv->vc_format;
+
+ radeon_cp_dispatch_indices(dev, buf, &prim);
+ if (elts.discard) {
+ radeon_cp_discard_buffer(dev, buf);
+ }
+
+ COMMIT_RING();
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_texture(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_texture_t tex;
+ drm_radeon_tex_image_t image;
+ int ret;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_texture_32_t tex32;
+ drm_radeon_tex_image_32_t image32;
+
+ DRM_COPYFROM_WITH_RETURN(&tex32, (void *)data, sizeof (tex32));
+ if (tex32.image == 0) {
+ DRM_ERROR("null texture image!\n");
+ return (EINVAL);
+ }
+ if (DRM_COPY_FROM_USER(&image32,
+ (void *)(uintptr_t)tex32.image, sizeof (image32))) {
+ cmn_err(CE_WARN, "copyin32 failed");
+ return (EFAULT);
+ }
+
+ tex.offset = tex32.offset;
+ tex.pitch = tex32.pitch;
+ tex.format = tex32.format;
+ tex.width = tex32.width;
+ tex.height = tex32.height;
+ tex.image = (void*)(uintptr_t)tex32.image;
+
+ image.x = image32.x;
+ image.y = image32.y;
+ image.width = image32.width;
+ image.height = image32.height;
+ image.data = (void*)(uintptr_t)image32.data;
+
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&tex, (void *)data, sizeof (tex));
+ if (tex.image == NULL) {
+ return (EINVAL);
+ }
+ if (DRM_COPY_FROM_USER(&image,
+ (drm_radeon_tex_image_t *)tex.image, sizeof (image))) {
+ return (EFAULT);
+ }
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ ret = radeon_cp_dispatch_texture(fpriv, dev, &tex, &image, mode);
+
+ COMMIT_RING();
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_stipple(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_stipple_t stipple;
+ u32 mask[32];
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_stipple_32_t stipple32;
+ DRM_COPYFROM_WITH_RETURN(&stipple32, (void *)data,
+ sizeof (stipple32));
+ stipple.mask = (void *)(uintptr_t)stipple32.mask;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&stipple, (void *)data,
+ sizeof (stipple));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof (u32)))
+ return (EFAULT);
+
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ radeon_cp_dispatch_stipple(dev, mask);
+
+ COMMIT_RING();
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_indirect(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_indirect_t indirect;
+ RING_LOCALS;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ DRM_COPYFROM_WITH_RETURN(&indirect, (void *) data, sizeof (indirect));
+
+ DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
+ indirect.idx, indirect.start, indirect.end, indirect.discard);
+
+ if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ indirect.idx, dma->buf_count - 1);
+ return (EINVAL);
+ }
+
+ buf = dma->buflist[indirect.idx];
+
+ if (buf->filp != fpriv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->filp);
+ return (EINVAL);
+ }
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", indirect.idx);
+ return (EINVAL);
+ }
+
+ if (indirect.start < buf->used) {
+ DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
+ indirect.start, buf->used);
+ return (EINVAL);
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf->used = indirect.end;
+
+ /*
+ * Wait for the 3D stream to idle before the indirect buffer
+ * containing 2D acceleration commands is processed.
+ */
+ BEGIN_RING(2);
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ ADVANCE_RING();
+
+ /*
+ * Dispatch the indirect buffer full of commands from the
+ * X server. This is insecure and is thus only available to
+ * privileged clients.
+ */
+ radeon_cp_dispatch_indirect(dev, buf, indirect.start, indirect.end);
+ if (indirect.discard) {
+ radeon_cp_discard_buffer(dev, buf);
+ }
+
+ COMMIT_RING();
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_sarea_t *sarea_priv;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_radeon_vertex2_t vertex;
+ int i;
+ unsigned char laststate;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+ sarea_priv = dev_priv->sarea_priv;
+
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_vertex2_32_t vertex32;
+
+ DRM_COPYFROM_WITH_RETURN(&vertex32, (void *) data,
+ sizeof (vertex32));
+ vertex.idx = vertex32.idx;
+ vertex.discard = vertex32.discard;
+ vertex.nr_states = vertex32.nr_states;
+ vertex.state = (void *) (uintptr_t)vertex32.state;
+ vertex.nr_prims = vertex32.nr_prims;
+ vertex.prim = (void *)(uintptr_t)vertex32.prim;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&vertex, (void *) data,
+ sizeof (vertex));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+
+ DRM_DEBUG("pid=%d index=%d discard=%d\n",
+ DRM_CURRENTPID, vertex.idx, vertex.discard);
+
+ if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ vertex.idx, dma->buf_count - 1);
+ return (EINVAL);
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf = dma->buflist[vertex.idx];
+
+ if (buf->filp != fpriv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->filp);
+ return (EINVAL);
+ }
+
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", vertex.idx);
+ return (EINVAL);
+ }
+
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ return (EINVAL);
+
+ for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) {
+ drm_radeon_prim_t prim;
+ drm_radeon_tcl_prim_t tclprim;
+
+ if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof (prim)))
+ return (EFAULT);
+
+ if (prim.stateidx != laststate) {
+ drm_radeon_state_t state;
+
+ if (DRM_COPY_FROM_USER(&state,
+ &vertex.state[prim.stateidx], sizeof (state)))
+ return (EFAULT);
+
+ if (radeon_emit_state2(dev_priv, fpriv, &state)) {
+ DRM_ERROR("radeon_emit_state2 failed\n");
+ return (EINVAL);
+ }
+
+ laststate = prim.stateidx;
+ }
+
+ tclprim.start = prim.start;
+ tclprim.finish = prim.finish;
+ tclprim.prim = prim.prim;
+ tclprim.vc_format = prim.vc_format;
+
+ if (prim.prim & RADEON_PRIM_WALK_IND) {
+ tclprim.offset = prim.numverts * 64;
+ tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
+
+ radeon_cp_dispatch_indices(dev, buf, &tclprim);
+ } else {
+ tclprim.numverts = prim.numverts;
+ tclprim.offset = 0; /* not used */
+
+ radeon_cp_dispatch_vertex(dev, buf, &tclprim);
+ }
+
+ if (sarea_priv->nbox == 1)
+ sarea_priv->nbox = 0;
+ }
+
+ if (vertex.discard) {
+ radeon_cp_discard_buffer(dev, buf);
+ }
+
+ COMMIT_RING();
+ return (0);
+}
+
+static int radeon_emit_packets(drm_radeon_private_t *dev_priv,
+ drm_file_t *filp_priv, drm_radeon_cmd_header_t header,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int id = (int)header.packet.packet_id;
+ int sz, reg;
+ u32 *data = (u32 *)(uintptr_t)cmdbuf->buf;
+ RING_LOCALS;
+
+ if (id >= RADEON_MAX_STATE_PACKETS)
+ return (EINVAL);
+
+ sz = packet[id].len;
+ reg = packet[id].start;
+
+ if (sz * sizeof (int) > cmdbuf->bufsz) {
+ DRM_ERROR("Packet size provided larger than data provided\n");
+ return (EINVAL);
+ }
+
+ if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) {
+ DRM_ERROR("Packet verification failed\n");
+ return (EINVAL);
+ }
+
+ BEGIN_RING(sz + 1);
+ OUT_RING(CP_PACKET0(reg, (sz - 1)));
+ OUT_RING_TABLE(data, sz);
+ ADVANCE_RING();
+
+ cmdbuf->buf += sz * sizeof (int);
+ cmdbuf->bufsz -= sz * sizeof (int);
+ return (0);
+}
+
+static inline int
+radeon_emit_scalars(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.scalars.count;
+ int start = header.scalars.offset;
+ int stride = header.scalars.stride;
+ RING_LOCALS;
+
+ BEGIN_RING(3 + sz);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+ OUT_RING_TABLE(cmdbuf->buf, sz);
+ ADVANCE_RING();
+ cmdbuf->buf += sz * sizeof (int);
+ cmdbuf->bufsz -= sz * sizeof (int);
+ return (0);
+}
+
+/*
+ * God this is ugly
+ */
+static inline int
+radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.scalars.count;
+ int start = ((unsigned int)header.scalars.offset) + 0x100;
+ int stride = header.scalars.stride;
+ RING_LOCALS;
+
+ BEGIN_RING(3 + sz);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+ OUT_RING_TABLE(cmdbuf->buf, sz);
+ ADVANCE_RING();
+ cmdbuf->buf += sz * sizeof (int);
+ cmdbuf->bufsz -= sz * sizeof (int);
+ return (0);
+}
+
+static inline int
+radeon_emit_vectors(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.vectors.count;
+ int start = header.vectors.offset;
+ int stride = header.vectors.stride;
+ RING_LOCALS;
+
+ BEGIN_RING(5 + sz);
+ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+ OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+ OUT_RING_TABLE(cmdbuf->buf, sz);
+ ADVANCE_RING();
+
+ cmdbuf->buf += sz * sizeof (int);
+ cmdbuf->bufsz -= sz * sizeof (int);
+ return (0);
+}
+
+static inline int
+radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.veclinear.count * 4;
+ int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
+ RING_LOCALS;
+
+ if (!sz)
+ return (0);
+ if (sz * 4 > cmdbuf->bufsz)
+ return (EINVAL);
+
+ BEGIN_RING(5 + sz);
+ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+ OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+ OUT_RING_TABLE(cmdbuf->buf, sz);
+ ADVANCE_RING();
+
+ cmdbuf->buf += sz * sizeof (int);
+ cmdbuf->bufsz -= sz * sizeof (int);
+ return (0);
+}
+
+static int
+radeon_emit_packet3(drm_device_t *dev, drm_file_t *filp_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ unsigned int cmdsz;
+ int ret;
+ RING_LOCALS;
+
+
+ if ((ret = radeon_check_and_fixup_packet3(dev_priv,
+ filp_priv, cmdbuf, &cmdsz))) {
+ DRM_ERROR("Packet verification failed\n");
+ return (ret);
+ }
+
+ BEGIN_RING(cmdsz);
+ OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ ADVANCE_RING();
+
+ cmdbuf->buf += cmdsz * 4;
+ cmdbuf->bufsz -= cmdsz * 4;
+ return (0);
+}
+
+static int radeon_emit_packet3_cliprect(drm_device_t *dev,
+ drm_file_t *filp_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ int orig_nbox)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_clip_rect_t box;
+ unsigned int cmdsz;
+ int ret;
+ drm_clip_rect_t __user *boxes = cmdbuf->boxes;
+ int i = 0;
+ RING_LOCALS;
+
+ if ((ret = radeon_check_and_fixup_packet3(dev_priv,
+ filp_priv, cmdbuf, &cmdsz))) {
+ DRM_ERROR("Packet verification failed\n");
+ return (ret);
+ }
+
+ if (!orig_nbox)
+ goto out;
+
+ do {
+ if (i < cmdbuf->nbox) {
+ if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof (box)))
+ return (EFAULT);
+ /*
+ * FIXME The second and subsequent times round
+ * this loop, send a WAIT_UNTIL_3D_IDLE before
+ * calling emit_clip_rect(). This fixes a
+ * lockup on fast machines when sending
+ * several cliprects with a cmdbuf, as when
+ * waving a 2D window over a 3D
+ * window. Something in the commands from user
+ * space seems to hang the card when they're
+ * sent several times in a row. That would be
+ * the correct place to fix it but this works
+ * around it until I can figure that out - Tim
+ * Smith
+ */
+ if (i) {
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ ADVANCE_RING();
+ }
+ radeon_emit_clip_rect(dev_priv, &box);
+ }
+
+ BEGIN_RING(cmdsz);
+ OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ ADVANCE_RING();
+
+ } while (++i < cmdbuf->nbox);
+ if (cmdbuf->nbox == 1)
+ cmdbuf->nbox = 0;
+
+out:
+ cmdbuf->buf += cmdsz * 4;
+ cmdbuf->bufsz -= cmdsz * 4;
+ return (0);
+}
+
+static int
+radeon_emit_wait(drm_device_t *dev, int flags)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
+ switch (flags) {
+ case RADEON_WAIT_2D:
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+ break;
+ case RADEON_WAIT_3D:
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ ADVANCE_RING();
+ break;
+ case RADEON_WAIT_2D | RADEON_WAIT_3D:
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf = NULL;
+ int idx;
+ drm_radeon_kcmd_buffer_t cmdbuf;
+ drm_radeon_cmd_header_t header;
+ int orig_nbox, orig_bufsz;
+ char *kbuf = NULL;
+
+ LOCK_TEST_WITH_RETURN(dev, fpriv);
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_kcmd_buffer_32_t cmdbuf32;
+
+ DRM_COPYFROM_WITH_RETURN(&cmdbuf32, (void *)data,
+ sizeof (cmdbuf32));
+ cmdbuf.bufsz = cmdbuf32.bufsz;
+ cmdbuf.buf = (void *)(uintptr_t)cmdbuf32.buf;
+ cmdbuf.nbox = cmdbuf32.nbox;
+ cmdbuf.boxes = (void *)(uintptr_t)cmdbuf32.boxes;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&cmdbuf, (void *) data,
+ sizeof (cmdbuf));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) {
+ return (EINVAL);
+ }
+
+ /*
+ * Allocate an in-kernel area and copy in the cmdbuf. Do this
+ * to avoid races between checking values and using those values
+ * in other code, and simply to avoid a lot of function calls
+ * to copy in data.
+ */
+ orig_bufsz = cmdbuf.bufsz;
+ if (orig_bufsz != 0) {
+ kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
+ if (kbuf == NULL)
+ return (ENOMEM);
+ if (DRM_COPY_FROM_USER(kbuf, (void *)cmdbuf.buf,
+ cmdbuf.bufsz)) {
+ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+ return (EFAULT);
+ }
+ cmdbuf.buf = kbuf;
+ }
+
+ orig_nbox = cmdbuf.nbox;
+
+ if (dev_priv->microcode_version == UCODE_R300) {
+ int temp;
+ temp = r300_do_cp_cmdbuf(dev, fpriv, &cmdbuf);
+
+ if (orig_bufsz != 0)
+ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+
+ return (temp);
+ }
+
+ /* microcode_version != r300 */
+ while (cmdbuf.bufsz >= sizeof (header)) {
+
+ header.i = *(int *)(uintptr_t)cmdbuf.buf;
+ cmdbuf.buf += sizeof (header);
+ cmdbuf.bufsz -= sizeof (header);
+
+ switch (header.header.cmd_type) {
+ case RADEON_CMD_PACKET:
+ DRM_DEBUG("RADEON_CMD_PACKET\n");
+ if (radeon_emit_packets
+ (dev_priv, fpriv, header, &cmdbuf)) {
+ DRM_ERROR("radeon_emit_packets failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_SCALARS:
+ DRM_DEBUG("RADEON_CMD_SCALARS\n");
+ if (radeon_emit_scalars(dev_priv, header, &cmdbuf)) {
+ DRM_ERROR("radeon_emit_scalars failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_VECTORS:
+ DRM_DEBUG("RADEON_CMD_VECTORS\n");
+ if (radeon_emit_vectors(dev_priv, header, &cmdbuf)) {
+ DRM_ERROR("radeon_emit_vectors failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_DMA_DISCARD:
+ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+ idx = header.dma.buf_idx;
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ goto err;
+ }
+
+ buf = dma->buflist[idx];
+ if (buf->filp != fpriv || buf->pending) {
+ DRM_ERROR("bad buffer %p %p %d\n",
+ buf->filp, fpriv, buf->pending);
+ goto err;
+ }
+
+ radeon_cp_discard_buffer(dev, buf);
+ break;
+
+ case RADEON_CMD_PACKET3:
+ DRM_DEBUG("RADEON_CMD_PACKET3\n");
+ if (radeon_emit_packet3(dev, fpriv, &cmdbuf)) {
+ DRM_ERROR("radeon_emit_packet3 failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_PACKET3_CLIP:
+ DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
+ if (radeon_emit_packet3_cliprect
+ (dev, fpriv, &cmdbuf, orig_nbox)) {
+ DRM_ERROR("radeon_emit_packet3_clip failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_SCALARS2:
+ DRM_DEBUG("RADEON_CMD_SCALARS2\n");
+ if (radeon_emit_scalars2(dev_priv, header, &cmdbuf)) {
+ DRM_ERROR("radeon_emit_scalars2 failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_WAIT:
+ DRM_DEBUG("RADEON_CMD_WAIT\n");
+ if (radeon_emit_wait(dev, header.wait.flags)) {
+ DRM_ERROR("radeon_emit_wait failed\n");
+ goto err;
+ }
+ break;
+ case RADEON_CMD_VECLINEAR:
+ DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
+ if (radeon_emit_veclinear(dev_priv, header, &cmdbuf)) {
+ DRM_ERROR("radeon_emit_veclinear failed\n");
+ goto err;
+ }
+ break;
+
+ default:
+ DRM_ERROR("bad cmd_type %d at %p\n",
+ header.header.cmd_type,
+ cmdbuf.buf - sizeof (header));
+ goto err;
+ }
+ }
+
+ if (orig_bufsz != 0)
+ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+
+ COMMIT_RING();
+ return (0);
+
+err:
+ if (orig_bufsz != 0)
+ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+ return (EINVAL);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_getparam(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_getparam_t param;
+ int value;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_getparam_32_t param32;
+
+ DRM_COPYFROM_WITH_RETURN(&param32,
+ (drm_radeon_getparam_32_t *)data, sizeof (param32));
+ param.param = param32.param;
+ param.value = (void *)(uintptr_t)param32.value;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&param,
+ (drm_radeon_getparam_t *)data, sizeof (param));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+ switch (param.param) {
+ case RADEON_PARAM_GART_BUFFER_OFFSET:
+ value = dev_priv->gart_buffers_offset;
+ break;
+ case RADEON_PARAM_LAST_FRAME:
+ dev_priv->stats.last_frame_reads++;
+ value = GET_SCRATCH(0);
+ break;
+ case RADEON_PARAM_LAST_DISPATCH:
+ value = GET_SCRATCH(1);
+ break;
+ case RADEON_PARAM_LAST_CLEAR:
+ dev_priv->stats.last_clear_reads++;
+ value = GET_SCRATCH(2);
+ break;
+ case RADEON_PARAM_IRQ_NR:
+ value = dev->irq;
+ break;
+ case RADEON_PARAM_GART_BASE:
+ value = dev_priv->gart_vm_start;
+ break;
+ case RADEON_PARAM_REGISTER_HANDLE:
+ value = dev_priv->mmio->offset;
+ break;
+ case RADEON_PARAM_STATUS_HANDLE:
+ value = dev_priv->ring_rptr_offset;
+ break;
+#ifndef __LP64__
+ /*
+ * This ioctl() doesn't work on 64-bit platforms because
+ * hw_lock is a pointer which can't fit into an int-sized
+ * variable. According to Michel Dänzer, the ioctl) is
+ * only used on embedded platforms, so not supporting it
+ * shouldn't be a problem. If the same functionality is
+ * needed on 64-bit platforms, a new ioctl() would have
+ * to be added, so backwards-compatibility for the embedded
+ * platforms can be maintained. --davidm 4-Feb-2004.
+ */
+ case RADEON_PARAM_SAREA_HANDLE:
+ /* The lock is the first dword in the sarea. */
+ value = (long)dev->lock.hw_lock;
+ break;
+#endif
+ case RADEON_PARAM_GART_TEX_HANDLE:
+ value = dev_priv->gart_textures_offset;
+ break;
+ case RADEON_PARAM_SCRATCH_OFFSET:
+ if (!dev_priv->writeback_works)
+ return (EINVAL);
+ value = RADEON_SCRATCH_REG_OFFSET;
+ break;
+
+ case RADEON_PARAM_CARD_TYPE:
+ if (dev_priv->flags & RADEON_IS_PCIE)
+ value = RADEON_CARD_PCIE;
+ else if (dev_priv->flags & RADEON_IS_AGP)
+ value = RADEON_CARD_AGP;
+ else
+ value = RADEON_CARD_PCI;
+ break;
+ case RADEON_PARAM_VBLANK_CRTC:
+ value = radeon_vblank_crtc_get(dev);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (DRM_COPY_TO_USER(param.value, &value, sizeof (int))) {
+ DRM_ERROR("copy_to_user\n");
+ return (EFAULT);
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+static int radeon_cp_setparam(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_setparam_t sp;
+ struct drm_radeon_driver_file_fields *radeon_priv;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return (EINVAL);
+ }
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ drm_radeon_setparam_32_t sp32;
+
+ DRM_COPYFROM_WITH_RETURN(&sp32, (void *) data, sizeof (sp32));
+ sp.param = sp32.param;
+ sp.value = sp32.value;
+ } else {
+#endif
+ DRM_COPYFROM_WITH_RETURN(&sp, (void *) data, sizeof (sp));
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ switch (sp.param) {
+ case RADEON_SETPARAM_FB_LOCATION:
+ radeon_priv = fpriv->driver_priv;
+ radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
+ break;
+ case RADEON_SETPARAM_SWITCH_TILING:
+ if (sp.value == 0) {
+ DRM_DEBUG("color tiling disabled\n");
+ dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ dev_priv->sarea_priv->tiling_enabled = 0;
+ } else if (sp.value == 1) {
+ DRM_DEBUG("color tiling enabled\n");
+ dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
+ dev_priv->sarea_priv->tiling_enabled = 1;
+ }
+ break;
+ case RADEON_SETPARAM_PCIGART_LOCATION:
+ dev_priv->pcigart_offset = (unsigned long)sp.value;
+ break;
+ case RADEON_SETPARAM_NEW_MEMMAP:
+ dev_priv->new_memmap = (int)sp.value;
+ break;
+ case RADEON_SETPARAM_VBLANK_CRTC:
+ return (radeon_vblank_crtc_set(dev, sp.value));
+ default:
+ DRM_DEBUG("Invalid parameter %d\n", sp.param);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * When a client dies:
+ * - Check for and clean up flipped page state
+ * - Free any alloced GART memory.
+ * - Free any alloced radeon surfaces.
+ *
+ * DRM infrastructure takes care of reclaiming dma buffers.
+ */
+void
+radeon_driver_preclose(drm_device_t *dev, drm_file_t *filp)
+{
+ if (dev->dev_private) {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ if (dev_priv->page_flipping) {
+ (void) radeon_do_cleanup_pageflip(dev);
+ }
+ radeon_mem_release(filp, dev_priv->gart_heap);
+ radeon_mem_release(filp, dev_priv->fb_heap);
+ radeon_surfaces_release(filp, dev_priv);
+ }
+}
+
+void
+radeon_driver_lastclose(drm_device_t *dev)
+{
+ radeon_do_release(dev);
+}
+
+int
+radeon_driver_open(drm_device_t *dev, drm_file_t *filp_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_driver_file_fields *radeon_priv;
+
+ radeon_priv =
+ (struct drm_radeon_driver_file_fields *)
+ drm_alloc(sizeof (*radeon_priv), DRM_MEM_FILES);
+
+ if (!radeon_priv)
+ return (-ENOMEM);
+
+ filp_priv->driver_priv = radeon_priv;
+
+ if (dev_priv)
+ radeon_priv->radeon_fb_delta = dev_priv->fb_location;
+ else
+ radeon_priv->radeon_fb_delta = 0;
+ return (0);
+}
+
+/*ARGSUSED*/
+void
+radeon_driver_postclose(drm_device_t *dev, drm_file_t *filp_priv)
+{
+ struct drm_radeon_driver_file_fields *radeon_priv =
+ filp_priv->driver_priv;
+
+ drm_free(radeon_priv, sizeof (* radeon_priv), DRM_MEM_FILES);
+}
+
+drm_ioctl_desc_t radeon_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] =
+ {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_START)] =
+ {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] =
+ {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] =
+ {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] =
+ {radeon_cp_idle, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] =
+ {radeon_cp_resume, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_RESET)] =
+ {radeon_engine_reset, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] =
+ {radeon_fullscreen, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SWAP)] =
+ {radeon_cp_swap, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] =
+ {radeon_cp_clear, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] =
+ {radeon_cp_vertex, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_INDICES)] =
+ {radeon_cp_indices, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] =
+ {radeon_cp_texture, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] =
+ {radeon_cp_stipple, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] =
+ {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] =
+ {radeon_cp_vertex2, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] =
+ {radeon_cp_cmdbuf, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] =
+ {radeon_cp_getparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_FLIP)] =
+ {radeon_cp_flip, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] =
+ {radeon_mem_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_FREE)] =
+ {radeon_mem_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] =
+ {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] =
+ {radeon_irq_emit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] =
+ {radeon_irq_wait, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] =
+ {radeon_cp_setparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] =
+ {radeon_surface_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] =
+ {radeon_surface_free, DRM_AUTH}
+};
+
+int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/usr/src/uts/intel/radeon/Makefile b/usr/src/uts/intel/radeon/Makefile
new file mode 100644
index 0000000..c7e5334
--- /dev/null
+++ b/usr/src/uts/intel/radeon/Makefile
@@ -0,0 +1,91 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# uts/intel/drm/Makefile
+#
+# This makefile drives the production of radeon graphics device driver,
+# which supports the DRI (Direct Rendering Infrastructure), with the help
+# of drm common misc module.
+#
+# intel platform dependent
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = radeon
+OBJECTS = $(RADEON_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(RADEON_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+# radeon driver depends on drm
+INC_PATH += -I$(UTSBASE)/intel/io/drm -I$(UTSBASE)/common/io/drm
+
+#
+# dependency
+LDFLAGS += -dy -Nmisc/drm
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Re-define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ